import requests
from lxml import etree
import re
import xlwt
from openpyxl import workbook # 写入Excel表所用
from openpyxl import load_workbook # 读取excel
# import matplotlib.pylab as plt
from xlrd import book
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Cookie': 'x-wl-uid=1DVw4k4T/jAduWIfwW2jvf029Ha4Bgv/AJGjP/yRfJTdq26dr7oDdeEBdb6zOPUl0ByfsaKJ3GUY=; session-id-time=2082729601l; session-id=457-7649276-4174543; csm-hit=tb:DAHATSQRZZBWHWD4ZXYP+s-T61YJHRDEC6Y6S2VMTVZ|1573355007668&t:1573355007668&adb:adblk_no; ubid-acbcn=459-2457809-1906210; session-token="4sZGQQPKw9CJUOzJFLsTdS3FtlpqIyp0hyvhXL6RMOchbDf7p7YLDEL90YFps2Hl80fBT6uPmzQ00meCLYxsrjuoabX3+kz7OB+CLw8GaAYZB8J9oBBcJLBUsGs6LLm/EHQht5Tm0IpOKR0hz0GGtATgcpJXDfRoEdvNol+CUc3mXOMA5KmEfFWstdV+KwyzSGrGW+DdrAftisgZMl2stffIdhcOLh53B4tJwsR5awKqPrOqZF8uJg=="; lc-acbcn=zh_CN; i18n-prefs=CNY'
} #添加headers模拟浏览器防止被发现
hao = []
zhong = []
cha = [] #获取到的评论数存入里面
all_info_list = []
def parge_page(url):
response = requests.get(url=url,headers=headers)
#print(response) #测试一下看看也没有请求到网页
text = response.text
html = etree.HTML(text)
quan = html.xpath('//div[@id="cm_cr-review_list"]/div') #获取到每个人的评论
for i in quan:
pinfen1 = i.xpath('.//span[@class="a-icon-alt"]/text()') #获取到每个人的评分几颗星
pinlun = i.xpath('.//span[@data-hook="review-body"]/span/text()') #获取到每个人评论的字
time = i.xpath('.//span[@data-hook="review-date"]/text()')
color = i.xpath('.//a[@ data-hook="format-strip"]/text()')
# size = i.xpath('.//i[@ class="a-icon-text-separator"]/text()')
#print(pinlun)
for pinfen in pinfen1:
#print(pinlun)
a = re.sub('颗星','',pinfen) #使用正则把后面不用的字符串替换为空,显得好看
#print(a)
list = {'评论':pinlun,'颜色和尺寸':color,'评分': a,'日期':time}
print(list)
info_list = [pinlun, color, a,time]
all_info_list.append(info_list)
# if a < str(2.0): #判断,小于3颗星就存入差评
# cha.append(a)
# elif a < str(4.0): #小于4颗星就存入中评
# zhong.append(a)
# else:
# hao.append(a) #否则都是好评
def main():
# url = 'https://www.amazon.cn/product-reviews/B074MFRPWL'
# parge_page(url)
for x in range(10): #获取100条评论,一页10条
url = 'https://www.amazon.com/product-reviews/B07XGK5QXD/?pageNumber='+ str(x) #网站:
if __name__ == '__main__':
main() # 调用main方法
book = xlwt.Workbook(encoding='utf-8')
sheet = book.add_sheet('sheet1')
head = ['评论', '颜色和尺寸', '评分', '日期'] # 表头
for h in range(len(head)):
sheet.write(0, h, head[h]) # 写入表头
i = 1
for list in all_info_list:
j = 0
for data in list:
sheet.write(i, j, data)
j += 1
i += 1
book.save('评论信息.xlsx')
print('完成',book)
如果还想爬取其他信息,自己添加Xpath!
import pandas as pd
data_path = '8-8026.xlsx'
df = pd.read_excel(data_path, encoding='gbk')
print(len(df)) # 长度
print(type(df)) # 数据类型
df = df.dropna() # 消除缺失数据 NaN为缺失数据
print('清除缺失数据后:')
print(len(df))
print(type(df))
df = pd.DataFrame(df.iloc[:, 0].unique()) # 去掉第一列的重复数据;iloc[:, 0]表示索引每一行的第一列;
print(len(df))
def str_unique(raw_str, reverse=False):
"""
比如:我喜欢喜欢喜欢喜欢喜欢喜欢该商品;去掉重复的“喜欢”
:param raw_str:
:param reverse: 是否转置
:return:
"""
if reverse:
raw_str = raw_str[::-1]
res_str = ''
for i in raw_str:
if i not in res_str:
res_str += i
if reverse:
res_str = res_str[::-1]
return res_str
ser1 = df.iloc[:, 0].apply(str_unique) # 这时,因为索引了第一列,所以结果成了Series;
# print('df2', type(ser1)) #
df2 = pd.DataFrame(ser1.apply(str_unique, reverse=True)) # 再次生成DataFrame;
print('机械压缩去词后:')
print(len(df2))
print(type(df2))
print('------------------')
df3 = df2[df2.iloc[:, 0].apply(len) >= 4]
print('短句过滤后:')
print(len(df3))
print('------------------')
from snownlp import SnowNLP # 情感分析语言处理库
# 语义积极的概率,越接近1情感表现越积极
coms = df3.iloc[:, 0].apply(lambda x: SnowNLP(x).sentiments)
# print('情感分析后:')
positive_df = df3[coms >= 0.9] # 好评
negative_df = df3[coms
1658128969
查看更多评论