forked from niaiwoya/github_huanghyw_jd_seckill-master
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test4.py
104 lines (88 loc) · 3.25 KB
/
test4.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from urllib.parse import urljoin
import re
from bs4 import BeautifulSoup
import tld as tld
import mysql.connector
import pymysql
import time
from bs4 import BeautifulSoup
import requests
from urllib.parse import urljoin
from pymysql.converters import escape_string
conn=mysql.connector.connect(
user='root',
password='123456',
host='127.0.0.1',
port='3306',
database='test'
)
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
def getASection(url,title):
cursor = conn.cursor()
try:
# f = open(bookName, "a", encoding='utf-8') # 利用追加的方式打开文件,这样后面章节的内容不会覆盖前面章节
rsp = requests.get(url, headers=headers)
rsp.encoding = 'utf-8'
n = 0
bs = BeautifulSoup(rsp.text, 'html.parser')
body = bs.select('div.d_post_content.j_d_post_content ')
n = 0
for i in body:
bodys = body[n]
content = bodys.text
#bodys = "🙉🙉dsadsadasdas"
bodyss = escape_string(filter_emoji(str(bodys), ''));
contents = escape_string(filter_emoji(str(content), ''));
n = n+1
title = title
times = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
site = '中国政府网'
data = (title, site,str(bodyss),times,contents)
query = "INSERT INTO yq_article(article_title,article_site,article_contenthtml,article_download_time,article_content) VALUES ('%s','%s','%s','%s','%s')" % (data)
try:
cursor.execute(query)
conn.commit()
except Exception as err:
print("sql语句执行错误", err,query)
conn.rollback()
cursor.close()
# f.writelines(content)
# f.close()
except IndexError as e:
print("======", e)
finally:
print('finally...')
def getarticle(url):
response = requests.get(url, headers=headers)
contents = response.text.replace("<!--",'').replace("-->",'')
soup = BeautifulSoup(contents, 'lxml')
urls = soup.find_all("a",attrs={'href':re.compile("^/p/\d+")})
# urls = soup.find_all("a",attrs={'href':True})
for i in urls:
url = urljoin(response.url, i["href"])
title = urljoin(response.url, i["title"])[24:]
print(url,str(title))
getASection(url,title)
# 替换表情符号
def filter_emoji(desstr,restr=''):
#过滤表情
try:
co = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
aa = co.sub(restr, desstr)
return aa
def chachong(a):
cursor = conn.cursor()
query = 'select distinct article_title from yq_article'
cursor.execute(query)
aa = cursor.fetchall()
print()
for i in aa:
print(i,'111',str(aa))
if a in str(aa):
print('1')
else:
print('2')
#getarticle('https://tieba.baidu.com/f?ie=utf-8&kw=%E6%A8%AA%E5%8E%BF&fr=search')
chachong("【关于横县鱼生】 发誓要做科普")