from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_readbook.items import ScrapyReadbookItem
class ReadSpider(CrawlSpider):
output_file = "output.txt"
# allowed_domains = ["www.dushu.com"]
# start_urls = ["https://www.dushu.com/book/1188_1.html"]
allowed_domains = ["finance.people.com.cn"]
start_urls = ["http://finance.people.com.cn/n1/2023/1024/c1004-40101769.html"]
# # LinkExtractor(allow=r"/book/1188_\d+.html"),
# LinkExtractor(allow=r".*"),
LinkExtractor(allow=r"http://finance\.people\.com\.cn/n1/2023/1024/c1004-40101769\.html"),
def parse_item(self, response):
# img_list = response.xpath('//div[@class="bookslist"]//img')
# img_list = response.xpath('//div[@class="layout rm_txt cf"]')
# print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
# title = img.xpath('.//h1/text()').extract_first()
# content = img.xpath('.//a/text()').extract_first()
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
title = response.xpath('/html/body/div[1]/div[7]/div[1]/h1/text()').extract_first()
# content = response.xpath('/html/body/div[1]/div[7]/div[1]/div[3]//p/text()').extract_first()
content_list = response.xpath('//p/text()').extract()
content = '\n'.join(content_list)
date = response.xpath('/html/body/div[1]/div[7]/div[1]/div[2]/div[1]/text()').extract_first()
source = response.xpath('/html/body/div[1]/div[7]/div[1]/div[2]/div[1]/a/text()').extract()
# book = ScrapyReadbookItem(title=title, content=content)
with open(self.output_file, "a", encoding="utf-8") as file:
file.write(content + "\n")
file.write(source + "\n")
