middlewares.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # -*- coding: utf-8 -*-
  2. # Define here the models for your spider middleware
  3. #
  4. # See documentation in:
  5. # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  6. from scrapy import signals
  7. from scrapy.http import HtmlResponse
  8. from selenium import webdriver
  9. from selenium.common.exceptions import TimeoutException
  10. class Image360SpiderMiddleware(object):
  11. # Not all methods need to be defined. If a method is not defined,
  12. # scrapy acts as if the spider middleware does not modify the
  13. # passed objects.
  14. @classmethod
  15. def from_crawler(cls, crawler):
  16. # This method is used by Scrapy to create your spiders.
  17. s = cls()
  18. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  19. return s
  20. def process_spider_input(self, response, spider):
  21. # Called for each response that goes through the spider
  22. # middleware and into the spider.
  23. # Should return None or raise an exception.
  24. return None
  25. def process_spider_output(self, response, result, spider):
  26. # Called with the results returned from the Spider, after
  27. # it has processed the response.
  28. # Must return an iterable of Request, dict or Item objects.
  29. for i in result:
  30. yield i
  31. def process_spider_exception(self, response, exception, spider):
  32. # Called when a spider or process_spider_input() method
  33. # (from other spider middleware) raises an exception.
  34. # Should return either None or an iterable of Response, dict
  35. # or Item objects.
  36. pass
  37. def process_start_requests(self, start_requests, spider):
  38. # Called with the start requests of the spider, and works
  39. # similarly to the process_spider_output() method, except
  40. # that it doesn’t have a response associated.
  41. # Must return only requests (not items).
  42. for r in start_requests:
  43. yield r
  44. def spider_opened(self, spider):
  45. spider.logger.info('Spider opened: %s' % spider.name)
  46. class Image360DownloaderMiddleware(object):
  47. # Not all methods need to be defined. If a method is not defined,
  48. # scrapy acts as if the downloader middleware does not modify the
  49. # passed objects.
  50. @classmethod
  51. def from_crawler(cls, crawler):
  52. # This method is used by Scrapy to create your spiders.
  53. s = cls()
  54. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  55. return s
  56. def process_request(self, request, spider):
  57. # Called for each request that goes through the downloader
  58. # middleware.
  59. # Must either:
  60. # - return None: continue processing this request
  61. # - or return a Response object
  62. # - or return a Request object
  63. # - or raise IgnoreRequest: process_exception() methods of
  64. # installed downloader middleware will be called
  65. return None
  66. def process_response(self, request, response, spider):
  67. # Called with the response returned from the downloader.
  68. # Must either;
  69. # - return a Response object
  70. # - return a Request object
  71. # - or raise IgnoreRequest
  72. return response
  73. def process_exception(self, request, exception, spider):
  74. # Called when a download handler or a process_request()
  75. # (from other downloader middleware) raises an exception.
  76. # Must either:
  77. # - return None: continue processing this exception
  78. # - return a Response object: stops process_exception() chain
  79. # - return a Request object: stops process_exception() chain
  80. pass
  81. def spider_opened(self, spider):
  82. spider.logger.info('Spider opened: %s' % spider.name)
  83. class TaobaoDownloaderMiddleWare(object):
  84. def __init__(self, timeout=None):
  85. self.timeout = timeout
  86. options = webdriver.ChromeOptions()
  87. options.add_argument('--headless')
  88. self.browser = webdriver.Chrome(options)
  89. self.browser.set_window_size(1000, 600)
  90. self.browser.implicitly_wait(10)
  91. # self.browser.add_cookie({})
  92. # self.browser.set_page_load_timeout(self.timeout)
  93. def __del__(self):
  94. self.browser.close()
  95. def process_request(self, request, spider):
  96. try:
  97. self.browser.get(request.url)
  98. return HtmlResponse(url=request.url, body=self.browser.page_source,
  99. request=request, encoding='utf-8', status=200)
  100. except TimeoutException:
  101. return HtmlResponse(url=request.url, status=500, request=request)
  102. def process_response(self, request, response, spider):
  103. return response
  104. def process_exception(self, request, exception, spider):
  105. pass
  106. @classmethod
  107. def from_crawler(cls, crawler):
  108. return cls(timeout=10)