# Scrapy settings for crawler2 project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html from scrapy.settings.default_settings import RANDOMIZE_DOWNLOAD_DELAY, RETRY_TIMES, RETRY_HTTP_CODES, \ DOWNLOADER_MIDDLEWARES BOT_NAME = "crawler2" SPIDER_MODULES = ["crawler2.spiders"] NEWSPIDER_MODULE = "crawler2.spiders" ADDONS = {} # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = "Mozilla/5.0 (compatible; crawler2/+thomassazeratdev@gmail.com)" # Obey robots.txt rules ROBOTSTXT_OBEY = True DOWNLOAD_DELAY = 1 RANDOMIZE_DOWNLOAD_DELAY = True # Concurrency and throttling settings CONCURRENT_REQUESTS = 4 CONCURRENT_REQUESTS_PER_DOMAIN = 2 CONCURRENT_REQUESTS_PER_IP = 2 # Errors handling RETRY_TIMES = 3 RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 403, 404, 408] DOWNLOADER_MIDDLEWARES = { 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, # Désactive l'user-agent par défaut 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 90, # Enable the retries } ITEM_PIPELINES = { 'crawler2.pipelines.CleanTextPipeline': 300, # Nettoyage des données 'crawler2.pipelines.MongoDBPipeline': 300, # Sauvegarde sur Mongo } # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", # "Accept-Language": "en", #} # Enable or disable spider middlewares # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # "crawler2.middlewares.Crawler2SpiderMiddleware": 543, #} # Enable or disable downloader middlewares # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # "crawler2.middlewares.Crawler2DownloaderMiddleware": 543, #} # Enable or disable extensions # See https://docs.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # "scrapy.extensions.telnet.TelnetConsole": None, #} # Configure item pipelines # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html #ITEM_PIPELINES = { # "crawler2.pipelines.Crawler2Pipeline": 300, #} # Enable and configure the AutoThrottle extension (disabled by default) # See https://docs.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = "httpcache" #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage" # Set settings whose default value is deprecated to a future-proof value FEED_EXPORT_ENCODING = "utf-8"