settings.py 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. # -*- coding: utf-8 -*-
  2. # Scrapy settings for douban project
  3. #
  4. # For simplicity, this file contains only settings considered important or
  5. # commonly used. You can find more settings consulting the documentation:
  6. #
  7. # https://doc.scrapy.org/en/latest/topics/settings.html
  8. # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
  9. # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  10. BOT_NAME = 'douban'
  11. SPIDER_MODULES = ['douban.spiders']
  12. NEWSPIDER_MODULE = 'douban.spiders'
  13. # Crawl responsibly by identifying yourself (and your website) on the user-agent
  14. USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5'
  15. # Obey robots.txt rules
  16. ROBOTSTXT_OBEY = True
  17. # Configure maximum concurrent requests performed by Scrapy (default: 16)
  18. #CONCURRENT_REQUESTS = 32
  19. # Configure a delay for requests for the same website (default: 0)
  20. # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
  21. # See also autothrottle settings and docs
  22. DOWNLOAD_DELAY = 3
  23. RANDOMIZE_DOWNLOAD_DELAY = True
  24. # The download delay setting will honor only one of:
  25. #CONCURRENT_REQUESTS_PER_DOMAIN = 16
  26. #CONCURRENT_REQUESTS_PER_IP = 16
  27. # Disable cookies (enabled by default)
  28. COOKIES_ENABLED = True
  29. MONGODB_SERVER = '120.77.222.217'
  30. MONGODB_PORT = 27017
  31. MONGODB_DB = 'douban'
  32. MONGODB_COLLECTION = 'movie'
  33. # Disable Telnet Console (enabled by default)
  34. #TELNETCONSOLE_ENABLED = False
  35. # Override the default request headers:
  36. #DEFAULT_REQUEST_HEADERS = {
  37. # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  38. # 'Accept-Language': 'en',
  39. #}
  40. # Enable or disable spider middlewares
  41. # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  42. #SPIDER_MIDDLEWARES = {
  43. # 'douban.middlewares.DoubanSpiderMiddleware': 543,
  44. #}
  45. # Enable or disable downloader middlewares
  46. # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
  47. #DOWNLOADER_MIDDLEWARES = {
  48. # 'douban.middlewares.DoubanDownloaderMiddleware': 543,
  49. #}
  50. # Enable or disable extensions
  51. # See https://doc.scrapy.org/en/latest/topics/extensions.html
  52. #EXTENSIONS = {
  53. # 'scrapy.extensions.telnet.TelnetConsole': None,
  54. #}
  55. # Configure item pipelines
  56. # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
  57. ITEM_PIPELINES = {
  58. 'douban.pipelines.DoubanPipeline': 400,
  59. }
  60. LOG_LEVEL = 'DEBUG'
  61. # Enable and configure the AutoThrottle extension (disabled by default)
  62. # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
  63. #AUTOTHROTTLE_ENABLED = True
  64. # The initial download delay
  65. #AUTOTHROTTLE_START_DELAY = 5
  66. # The maximum download delay to be set in case of high latencies
  67. #AUTOTHROTTLE_MAX_DELAY = 60
  68. # The average number of requests Scrapy should be sending in parallel to
  69. # each remote server
  70. #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
  71. # Enable showing throttling stats for every response received:
  72. #AUTOTHROTTLE_DEBUG = False
  73. # Enable and configure HTTP caching (disabled by default)
  74. # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
  75. #HTTPCACHE_ENABLED = True
  76. #HTTPCACHE_EXPIRATION_SECS = 0
  77. #HTTPCACHE_DIR = 'httpcache'
  78. #HTTPCACHE_IGNORE_HTTP_CODES = []
  79. #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'