123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138 |
- # -*- coding: utf-8 -*-
- # Define here the models for your spider middleware
- #
- # See documentation in:
- # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
- from scrapy import signals
- import random
- import base64
- from xiaowu.settings import PROXIES
- class XiaowuSpiderMiddleware(object):
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the spider middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_spider_input(self, response, spider):
- # Called for each response that goes through the spider
- # middleware and into the spider.
- # Should return None or raise an exception.
- return None
- def process_spider_output(self, response, result, spider):
- # Called with the results returned from the Spider, after
- # it has processed the response.
- # Must return an iterable of Request, dict or Item objects.
- for i in result:
- yield i
- def process_spider_exception(self, response, exception, spider):
- # Called when a spider or process_spider_input() method
- # (from other spider middleware) raises an exception.
- # Should return either None or an iterable of Response, dict
- # or Item objects.
- pass
- def process_start_requests(self, start_requests, spider):
- # Called with the start requests of the spider, and works
- # similarly to the process_spider_output() method, except
- # that it doesn’t have a response associated.
- # Must return only requests (not items).
- for r in start_requests:
- yield r
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- class XiaowuDownloaderMiddleware(object):
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the downloader middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- # Called for each request that goes through the downloader
- # middleware.
- # Must either:
- # - return None: continue processing this request
- # - or return a Response object
- # - or return a Request object
- # - or raise IgnoreRequest: process_exception() methods of
- # installed downloader middleware will be called
- return None
- def process_response(self, request, response, spider):
- # Called with the response returned from the downloader.
- # Must either;
- # - return a Response object
- # - return a Request object
- # - or raise IgnoreRequest
- return response
- def process_exception(self, request, exception, spider):
- # Called when a download handler or a process_request()
- # (from other downloader middleware) raises an exception.
- # Must either:
- # - return None: continue processing this exception
- # - return a Response object: stops process_exception() chain
- # - return a Request object: stops process_exception() chain
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- class RandomUserAgent(object):
- def __init__(self, agents):
- # 使用初始化的agents列表
- self.agents = agents
- @classmethod
- def from_crawler(cls, crawler):
- # 获取settings的USER_AGENT列表并返回
- return cls(crawler.settings.getlist('USER_AGENTS'))
- def process_request(self, request, spider):
- # 随机设置Request报头header的User-Agent
- request.headers.setdefault('User-Agent', random.choice(self.agents))
- # 随机使用预定义列表里的 Proxy代理
- class ProxyMiddleware(object):
- def process_request(self, request, spider):
- # 随机获取from settings import PROXIES里的代理
- proxy = random.choice(PROXIES)
- # 如果代理可用,则使用代理
- if proxy['user_pass'] is not None:
- request.meta['proxy'] = "http://%s" % proxy['ip_port']
- # # 对代理数据进行base64编码
- # encoded_user_pass = base64.encodestring(proxy['user_pass'])
- # # 添加到HTTP代理格式里
- # request.headers['Proxy-Authorization'] = 'Basic ' + encoded_user_pass
- else:
- print("****代理失效****" + proxy['ip_port'])
- request.meta['proxy'] = "http://%s" % proxy['ip_port']
|