目录
- class MyScrapyPipeline:
- def process_item(self, item, spider):
- # 简单保存
- with open('demo2.txt','a',encoding= 'utf8') as f:
- f.write(item['author'] + '\n'+ item['text'] + '\n\n\n')
-
-
- return item

- import scrapy
- from my_scrapy.items import MyScrapyItem
-
- class SpiderSpider(scrapy.Spider):
- # 爬虫名称
- name = 'spider'
- # 域名限制,允许爬取的范围
- # allowed_domains = ['https://quotes.toscrape.com/']
- # 初始请求的页面
- start_urls = ['https://quotes.toscrape.com//']
-
- def parse(self, response):
- # text = response.text
-
- quotes = response.xpath('//div[@class="quote"]')
- for quote in quotes :
- # 旧方法 get()为新方法
- # text = quote.xpath('./span[@class = "text"]/text()').extract_first()
-
- # 实例化对象
- item = MyScrapyItem()
-
- # 利用xpth进行爬取
- text = quote.xpath('./span[@class = "text"]/text()').get()
- author = quote.xpath('.//small[@class="author"]/text()').get()
- Tags = quote.xpath('.//a[@class="tag"]/text()').getall()
- item['text'] = text
- item['author'] = author
- item['Tag'] = Tags
-
- # 迭代出去
- yield item
- import scrapy
-
-
- class MyScrapyItem(scrapy.Item):
- # define the fields for your item here like:
- # name = scrapy.Field()
- # 名言
- text = scrapy.Field()
- # 名人
- author = scrapy.Field()
- # 标签
- Tag = scrapy.Field()
- class MyScrapyPipeline:
- def process_item(self, item, spider):
- # 简单保存
- with open('demo2.txt','a',encoding= 'utf8') as f:
- f.write(item['author'] + '\n'+ item['text'] + '\n\n\n')
-
-
- return item
- ITEM_PIPELINES = {
- 'my_scrapy.pipelines.MyScrapyPipeline': 300,
- }
- from scrapy import cmdline
-
-
- cmdline.execute('scrapy crawl spider '.split())