JS+Selenium+excel追加写入,使用python乐成爬取京东任何商品~

之前一直是requests库做爬虫,这次实验下使用selenium做爬虫,效率不高,然则却没有限制,文章是划分连系大牛的selenium爬虫以及excel追加写入操作而成,另有待优化,计划爬取更多信息后举行词云剖析

coding++:@DisallowConcurrentExecution 注解的作用

'''
爬取京东商品信息:
    请求url:
        https://www.jd.com/
    提取商品信息:
        1.商品详情页
        2.商品名称
        3.商品价格
        4.评价人数
        5.商品商家
'''
# coding=UTF-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import xlrd
import xlwt
from xlutils.copy import copy


def write_excel_xls(path, sheet_name, value):
    index = len(value)  # 获取需要写入数据的行数
    workbook = xlwt.Workbook()  # 新建一个事情簿
    sheet = workbook.add_sheet(sheet_name)  # 在事情簿中新建一个表格
    for i in range(0, index):
        for j in range(0, len(value[i])):
            sheet.write(i, j, value[i][j])  # 像表格中写入数据(对应的行和列)
    workbook.save(path)  # 保留事情簿
    print("xls花样表格写入数据乐成!")


def write_excel_xls_append(path, value):
    index = len(value)  # 获取需要写入数据的行数
    workbook = xlrd.open_workbook(path)  # 打开事情簿
    sheets = workbook.sheet_names()  # 获取事情簿中的所有表格
    worksheet = workbook.sheet_by_name(sheets[0])  # 获取事情簿中所有表格中的的第一个表格
    rows_old = worksheet.nrows  # 获取表格中已存在的数据的行数
    new_workbook = copy(workbook)  # 将xlrd工具拷贝转化为xlwt工具
    new_worksheet = new_workbook.get_sheet(0)  # 获取转化后事情簿中的第一个表格
    for i in range(0, index):
        for j in range(0, len(value[i])):
            new_worksheet.write(i + rows_old, j, value[i][j])  # 追加写入数据,注重是从i+rows_old行最先写入
    new_workbook.save(path)  # 保留事情簿
    print("xls花样表格【追加】写入数据乐成!")


def read_excel_xls(path):
    workbook = xlrd.open_workbook(path)  # 打开事情簿
    sheets = workbook.sheet_names()  # 获取事情簿中的所有表格
    worksheet = workbook.sheet_by_name(sheets[0])  # 获取事情簿中所有表格中的的第一个表格
    for i in range(0, worksheet.nrows):
        for j in range(0, worksheet.ncols):
            print(worksheet.cell_value(i, j), "\t", end="")  # 逐行逐列读取数据
        print()


def get_good(driver):
    value = []
    # 通过JS控制滚轮滑动获取所有商品信息
    js_code = '''
            window.scrollTo(0,5000);
        '''
    driver.execute_script(js_code)  # 执行js代码

    # 守候数据加载
    time.sleep(2)

    # 3、查找所有商品div
    # good_div = driver.find_element_by_id('J_goodsList')
    good_list = driver.find_elements_by_class_name('gl-item')
    n = 1
    for good in good_list:
        # 凭据属性选择器查找
        # 商品链接
        good_url = good.find_element_by_css_selector(
            '.p-img a').get_attribute('href')

        # 商品名称
        good_name = good.find_element_by_css_selector(
            '.p-name em').text.replace("\n", "--")

        # 商品价格
        good_price = good.find_element_by_class_name(
            'p-price').text.replace("\n", ":")

        # 评价人数
        good_commit = good.find_element_by_class_name(
            'p-commit').text.replace("\n", " ")

        # good_content = f'''
        #                 商品链接: {good_url}
        #                 商品名称: {good_name}
        #                 商品价格: {good_price}
        #                 评价人数: {good_commit}
        #                 \n
        #                 '''
        # print(good_content)
        # with open('jd.txt', 'a', encoding='utf-8') as f:
        #     f.write(good_content)
        value1 = [good_url, good_name, good_price, good_commit]
        value.append(value1)

    return value


if __name__ == '__main__':
    good_name = input('请输入爬取商品信息:').strip()
    num = int(input('请输入要爬取的页数:'))
    driver = webdriver.Chrome()
    driver.implicitly_wait(10)
    # # 1、往京东主页发送请求
    driver.get('https://www.jd.com/')
    #
    # # 2、输入商品名称,并回车搜索
    input_tag = driver.find_element_by_id('key')
    input_tag.send_keys(good_name)
    input_tag.send_keys(Keys.ENTER)
    time.sleep(2)
    # 谈论数排行
    driver.find_element_by_link_text('谈论数').click()
    time.sleep(2)
    book_name_xls = good_name + '.xls'
    sheet_name_xls = good_name
    value_title = [["商品链接", "商品名称", "商品价格", "评价人数"], ]
    write_excel_xls(book_name_xls, sheet_name_xls, value_title)
    for i in range(0, num):
        value = get_good(driver)
        write_excel_xls_append(book_name_xls, value)
        next_tag = driver.find_element_by_class_name('pn-next')
        next_tag.click()
        time.sleep(2)
        read_excel_xls(book_name_xls)
    driver.close()

 

原创文章,作者:admin,如若转载,请注明出处:https://www.2lxm.com/archives/6506.html