# coding:utf-8 import re import time import requests import lxml.html from lxml import etree urls = ['https://www.xxddxs.com/book/{}'.format(i) for i in range(1, 20)] path = r'D:\py\venv\Scripts\xiaoshuo' # path = './' sleep = 1 def get_text(url): r = requests.get(url) r.encoding = 'utf-8' selector = etree.HTML(r.text) # Get the book title from the page book_info = selector.xpath('/html/body/div[5]')[0] title = book_info.xpath("//h1")[0].text # title = .xpath('/html/body/div[5]/div[2]/h1') print(title) # If no title found, print an error message and return early if len(title) == 0: print(f"No title found for {url}") return # Get the text of the chapter chapter_list = selector.xpath('/html/body/div[5]/dl/dd/a') last_chapter_num = re.search(r'(\d*)\.html$',chapter_list[-1].attrib.get('href')).group(1) # Write the text to a file in the specified directory with open(title + ".txt", 'w', encoding='utf-8') as f: for num in range(1, int(last_chapter_num)+1): content_url = url + f'/{num}.html' print(content_url) time.sleep(sleep) # 防止请求量过多导致网页拒绝访问 try: r = requests.get(content_url, timeout=30) except: print('连接失败,重试一次') r = requests.get(content_url, timeout=30) r.encoding = 'utf-8' content_dom = lxml.html.fromstring(r.text) content = content_dom.xpath('//*[@id="chaptercontent"]')[0].text_content() f.writelines(content) if __name__ == '__main__': for url in urls: get_text(url) 这个代码的运行思路
时间: 2024-04-02 11:36:16 浏览: 144
这个代码的运行思路如下:
1. 定义需要爬取小说的URL列表和保存小说的文件路径
2. 定义一个函数 `get_text` 用来获取小说内容,其中包括小说标题,小说章节信息和小说正文
3. 在 `get_text` 函数中,先通过URL获取小说页面的HTML文本,然后使用XPath解析HTML文本获取小说的标题和章节信息
4. 然后通过正则表达式获取小说最新章节的编号,并根据章节编号遍历获取每一章节的正文
5. 最后将小说的标题和正文保存到指定的文件路径中
6. 在主程序中,循环遍历URL列表,调用 `get_text` 函数来获取每个小说的内容并保存到文件中。
阅读全文
相关推荐





