settings.py 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. # -*- coding: utf-8 -*-
  2. # Scrapy settings for douban project
  3. #
  4. # For simplicity, this file contains only settings considered important or
  5. # commonly used. You can find more settings consulting the documentation:
  6. #
  7. # https://doc.scrapy.org/en/latest/topics/settings.html
  8. # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
  9. # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  10. BOT_NAME = 'douban'
  11. MONGO_SERVER = '120.77.222.217'
  12. MONGO_PORT = 27017
  13. SPIDER_MODULES = ['douban.spiders']
  14. NEWSPIDER_MODULE = 'douban.spiders'
  15. # Crawl responsibly by identifying yourself (and your website) on the user-agent
  16. USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
  17. 'Chrome/65.0.3325.181 Safari/537.36'
  18. # Obey robots.txt rules
  19. ROBOTSTXT_OBEY = False
  20. # Configure maximum concurrent requests performed by Scrapy (default: 16)
  21. CONCURRENT_REQUESTS = 2
  22. # Configure a delay for requests for the same website (default: 0)
  23. # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
  24. # See also autothrottle settings and docs
  25. DOWNLOAD_DELAY = 5
  26. # The download delay setting will honor only one of:
  27. #CONCURRENT_REQUESTS_PER_DOMAIN = 16
  28. #CONCURRENT_REQUESTS_PER_IP = 16
  29. # Disable cookies (enabled by default)
  30. #COOKIES_ENABLED = False
  31. # Disable Telnet Console (enabled by default)
  32. #TELNETCONSOLE_ENABLED = False
  33. # Override the default request headers:
  34. #DEFAULT_REQUEST_HEADERS = {
  35. # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  36. # 'Accept-Language': 'en',
  37. #}
  38. # Enable or disable spider middlewares
  39. # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  40. #SPIDER_MIDDLEWARES = {
  41. # 'douban.middlewares.DoubanSpiderMiddleware': 543,
  42. #}
  43. # Enable or disable downloader middlewares
  44. # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
  45. DOWNLOADER_MIDDLEWARES = {
  46. 'douban.middlewares.DoubanDownloaderMiddleware': 543,
  47. }
  48. # Enable or disable extensions
  49. # See https://doc.scrapy.org/en/latest/topics/extensions.html
  50. #EXTENSIONS = {
  51. # 'scrapy.extensions.telnet.TelnetConsole': None,
  52. #}
  53. # Configure item pipelines
  54. # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
  55. ITEM_PIPELINES = {
  56. 'douban.pipelines.DoubanPipeline': 300,
  57. }
  58. # Enable and configure the AutoThrottle extension (disabled by default)
  59. # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
  60. #AUTOTHROTTLE_ENABLED = True
  61. # The initial download delay
  62. #AUTOTHROTTLE_START_DELAY = 5
  63. # The maximum download delay to be set in case of high latencies
  64. #AUTOTHROTTLE_MAX_DELAY = 60
  65. # The average number of requests Scrapy should be sending in parallel to
  66. # each remote server
  67. #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
  68. # Enable showing throttling stats for every response received:
  69. #AUTOTHROTTLE_DEBUG = False
  70. # Enable and configure HTTP caching (disabled by default)
  71. # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
  72. #HTTPCACHE_ENABLED = True
  73. #HTTPCACHE_EXPIRATION_SECS = 0
  74. #HTTPCACHE_DIR = 'httpcache'
  75. #HTTPCACHE_IGNORE_HTTP_CODES = []
  76. #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'