Skip to content

Commit 33d72ba

Browse files
committed
qiushibaike
1 parent 91fea5e commit 33d72ba

File tree

14 files changed

+290
-0
lines changed

14 files changed

+290
-0
lines changed

qiushibaike/qiushibaike/__init__.py

Whitespace-only changes.
156 Bytes
Binary file not shown.
401 Bytes
Binary file not shown.
937 Bytes
Binary file not shown.
534 Bytes
Binary file not shown.

qiushibaike/qiushibaike/items.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your scraped items
4+
#
5+
# See documentation in:
6+
# https://doc.scrapy.org/en/latest/topics/items.html
7+
8+
import scrapy
9+
10+
11+
class QiushibaikeItem(scrapy.Item):
12+
# define the fields for your item here like:
13+
# name = scrapy.Field()
14+
author = scrapy.Field()
15+
content = scrapy.Field()
16+
_id = scrapy.Field()
17+
18+
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your spider middleware
4+
#
5+
# See documentation in:
6+
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
7+
8+
from scrapy import signals
9+
10+
11+
class QiushibaikeSpiderMiddleware(object):
12+
# Not all methods need to be defined. If a method is not defined,
13+
# scrapy acts as if the spider middleware does not modify the
14+
# passed objects.
15+
16+
@classmethod
17+
def from_crawler(cls, crawler):
18+
# This method is used by Scrapy to create your spiders.
19+
s = cls()
20+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
21+
return s
22+
23+
def process_spider_input(self, response, spider):
24+
# Called for each response that goes through the spider
25+
# middleware and into the spider.
26+
27+
# Should return None or raise an exception.
28+
return None
29+
30+
def process_spider_output(self, response, result, spider):
31+
# Called with the results returned from the Spider, after
32+
# it has processed the response.
33+
34+
# Must return an iterable of Request, dict or Item objects.
35+
for i in result:
36+
yield i
37+
38+
def process_spider_exception(self, response, exception, spider):
39+
# Called when a spider or process_spider_input() method
40+
# (from other spider middleware) raises an exception.
41+
42+
# Should return either None or an iterable of Response, dict
43+
# or Item objects.
44+
pass
45+
46+
def process_start_requests(self, start_requests, spider):
47+
# Called with the start requests of the spider, and works
48+
# similarly to the process_spider_output() method, except
49+
# that it doesn’t have a response associated.
50+
51+
# Must return only requests (not items).
52+
for r in start_requests:
53+
yield r
54+
55+
def spider_opened(self, spider):
56+
spider.logger.info('Spider opened: %s' % spider.name)
57+
58+
59+
class QiushibaikeDownloaderMiddleware(object):
60+
# Not all methods need to be defined. If a method is not defined,
61+
# scrapy acts as if the downloader middleware does not modify the
62+
# passed objects.
63+
64+
@classmethod
65+
def from_crawler(cls, crawler):
66+
# This method is used by Scrapy to create your spiders.
67+
s = cls()
68+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
69+
return s
70+
71+
def process_request(self, request, spider):
72+
# Called for each request that goes through the downloader
73+
# middleware.
74+
75+
# Must either:
76+
# - return None: continue processing this request
77+
# - or return a Response object
78+
# - or return a Request object
79+
# - or raise IgnoreRequest: process_exception() methods of
80+
# installed downloader middleware will be called
81+
return None
82+
83+
def process_response(self, request, response, spider):
84+
# Called with the response returned from the downloader.
85+
86+
# Must either;
87+
# - return a Response object
88+
# - return a Request object
89+
# - or raise IgnoreRequest
90+
return response
91+
92+
def process_exception(self, request, exception, spider):
93+
# Called when a download handler or a process_request()
94+
# (from other downloader middleware) raises an exception.
95+
96+
# Must either:
97+
# - return None: continue processing this exception
98+
# - return a Response object: stops process_exception() chain
99+
# - return a Request object: stops process_exception() chain
100+
pass
101+
102+
def spider_opened(self, spider):
103+
spider.logger.info('Spider opened: %s' % spider.name)
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define your item pipelines here
4+
#
5+
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
6+
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7+
import pymongo
8+
9+
10+
class QiushibaikePipeline(object):
11+
12+
def __init__(self):
13+
self.connection = pymongo.MongoClient('localhost', 27017)
14+
self.db = self.connection.scrapy
15+
self.collection = self.db.qiushibaike
16+
17+
def process_item(self, item, spider):
18+
if not self.connection or not item:
19+
return
20+
self.collection.save(item)
21+
22+
def __del__(self):
23+
if self.connection:
24+
self.connection.close()
25+
26+
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Scrapy settings for qiushibaike project
4+
#
5+
# For simplicity, this file contains only settings considered important or
6+
# commonly used. You can find more settings consulting the documentation:
7+
#
8+
# https://doc.scrapy.org/en/latest/topics/settings.html
9+
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
10+
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
11+
12+
BOT_NAME = 'qiushibaike'
13+
14+
SPIDER_MODULES = ['qiushibaike.spiders']
15+
NEWSPIDER_MODULE = 'qiushibaike.spiders'
16+
17+
FEED_EXPORT_ENCODING = 'utf-8'
18+
19+
20+
# Crawl responsibly by identifying yourself (and your website) on the user-agent
21+
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/73.0.3683.86 Chrome/73.0.3683.86 Safari/537.36'
22+
23+
# Obey robots.txt rules
24+
ROBOTSTXT_OBEY = True
25+
26+
# Configure maximum concurrent requests performed by Scrapy (default: 16)
27+
#CONCURRENT_REQUESTS = 32
28+
29+
# Configure a delay for requests for the same website (default: 0)
30+
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
31+
# See also autothrottle settings and docs
32+
#DOWNLOAD_DELAY = 3
33+
# The download delay setting will honor only one of:
34+
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
35+
#CONCURRENT_REQUESTS_PER_IP = 16
36+
37+
# Disable cookies (enabled by default)
38+
#COOKIES_ENABLED = False
39+
40+
# Disable Telnet Console (enabled by default)
41+
#TELNETCONSOLE_ENABLED = False
42+
43+
# Override the default request headers:
44+
#DEFAULT_REQUEST_HEADERS = {
45+
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
46+
# 'Accept-Language': 'en',
47+
#}
48+
49+
# Enable or disable spider middlewares
50+
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
51+
#SPIDER_MIDDLEWARES = {
52+
# 'qiushibaike.middlewares.QiushibaikeSpiderMiddleware': 543,
53+
#}
54+
55+
# Enable or disable downloader middlewares
56+
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
57+
#DOWNLOADER_MIDDLEWARES = {
58+
# 'qiushibaike.middlewares.QiushibaikeDownloaderMiddleware': 543,
59+
#}
60+
61+
# Enable or disable extensions
62+
# See https://doc.scrapy.org/en/latest/topics/extensions.html
63+
#EXTENSIONS = {
64+
# 'scrapy.extensions.telnet.TelnetConsole': None,
65+
#}
66+
67+
# Configure item pipelines
68+
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
69+
ITEM_PIPELINES = {
70+
'qiushibaike.pipelines.QiushibaikePipeline': 300,
71+
}
72+
73+
# Enable and configure the AutoThrottle extension (disabled by default)
74+
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
75+
#AUTOTHROTTLE_ENABLED = True
76+
# The initial download delay
77+
#AUTOTHROTTLE_START_DELAY = 5
78+
# The maximum download delay to be set in case of high latencies
79+
#AUTOTHROTTLE_MAX_DELAY = 60
80+
# The average number of requests Scrapy should be sending in parallel to
81+
# each remote server
82+
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
83+
# Enable showing throttling stats for every response received:
84+
#AUTOTHROTTLE_DEBUG = False
85+
86+
# Enable and configure HTTP caching (disabled by default)
87+
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
88+
#HTTPCACHE_ENABLED = True
89+
#HTTPCACHE_EXPIRATION_SECS = 0
90+
#HTTPCACHE_DIR = 'httpcache'
91+
#HTTPCACHE_IGNORE_HTTP_CODES = []
92+
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# This package will contain the spiders of your Scrapy project
2+
#
3+
# Please refer to the documentation for information on how to create and manage
4+
# your spiders.

0 commit comments

Comments
 (0)