Scrapy Rules: Exclude certain urls with process links - python-3.x

I am very happy to having discovered the Scrapy Crawl Class with its Rule Objects. However when I am trying to extract urls which contain the word "login" with process_links it doesn't work. The solution I implemented comes from here: Example code for Scrapy process_links and process_request but it doesn't exclude the pages I want
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.loader import ItemLoader
from accenture.items import AccentureItem
class AccentureSpiderSpider(CrawlSpider):
name = 'accenture_spider'
start_urls = ['https://www.accenture.com/us-en/internet-of-things-index']
rules = (
Rule(LinkExtractor(restrict_xpaths='//a[contains(#href, "insight")]'), callback='parse_item',process_links='process_links', follow=True),
)
def process_links(self, links):
for link in links:
if 'login' in link.text:
continue # skip all links that have "login" in their text
yield link
def parse_item(self, response):
loader = ItemLoader(item=AccentureItem(), response=response)
url = response.url
loader.add_value('url', url)
yield loader.load_item()

My mistake was to use link.text
When using link.url it works fine :)

Related

Scrapy CrawlSpider next page isn't working

I wanted to scrape all items from each card and the firsr rule is working fine but the second rule meaning pagination rule is not working.
This is my code:
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class RealtorListSpider(CrawlSpider):
name = 'realtor_list'
allowed_domains = ['www.realtor.com']
start_urls = ['https://www.realtor.com/realestateagents/New-Orleans_LA/pg-1']
rules = (
Rule(LinkExtractor(restrict_xpaths='//*[#data-testid="component-agentCard"]'), callback='parse_item', follow=False),
Rule(LinkExtractor(restrict_xpaths='//a[#aria-label="Go to next page"]'), callback='parse_item', follow=True),
)
def parse_item(self, response):
yield{
'name': response.xpath('(//*[#class="jsx-3130164309 profile-Tiltle-main"]/text())[2]').get()
}
The problem is in your element selection in linkextractor, not in rule for pagination. Xpath expression doesn't contain link selection but selection is correct that's why I 've made the pagination in starting url and it's working fine.
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class RealtorListSpider(CrawlSpider):
name = 'realtor_list'
allowed_domains = ['www.realtor.com']
start_urls = ['https://www.realtor.com/realestateagents/New-Orleans_LA/pg-'+str(x) +'' for x in range(1,6)]
rules = (
Rule(LinkExtractor(restrict_xpaths='//*[#data-testid="component-agentCard"]'), callback='parse_item', follow=False),
)
def parse_item(self, response):
yield{
'name': response.xpath('(//*[#class="jsx-3130164309 profile-Tiltle-main"]/text())[2]').get()
}

How to use proxy in scrapy crawler?

from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scraper_api import ScraperAPIClient
class Spider(CrawlSpider):
allowed_domains = ['example.com']
client = ScraperAPIClient('xyz')
#Initilize method for taking argument from user
def __init__(self, category=None,location=None ,**kwargs):
#self.start_urls = [client.scrapyGet(url = 'http://example.com')]
super().__init__(**kwargs) # python3
rules = (
Rule(LinkExtractor(restrict_xpaths="//div[contains(#class,'on-click-container')]/a[contains(#href, '/biz/')]"), callback='parse_item', follow=True,process_request='set_proxy'),
#for next page
Rule(LinkExtractor(restrict_xpaths='//a[contains(#class,"next-link")]'),process_request='set_proxy')
)
def set_proxy(self,request):
pass
def parse_item(self, response):
# contains data
yield{
# BUSINESS INFORMATION
"Company Name": response.xpath('//title').extract_first(),
}
Here I don't understand how to write set_proxy function that send request to scraper API server. Please help about this.

No Module name 'scrapy.Spider' found

Try to excute the code below and using the latest version of scrapy. Don't know what happen
import scrapy
from scrapy.Spider import Basespider
class crawler (Basespider):
name = "crawler"
allowed_domains = ['google.com']
start_urls = ["https://www.google.com"]
def parse(self, response):
hxs = Selector(response)
BaseSpide is from scrapy 0.16.5. If you have the newest version, then use another spider. This one is obsolete.

Extract articles from its corresponding links from a webpage using scrapy

Hi I am new to scrapy and I am Trying to extract text from links in a given webpage. Here is the code I wrote for the same and after running scrapy crawl article, it gives no module named article. Can you help me find where I am wrong? Thanks in advance.
import scrapy
from urllib.parse import urljoin
class ArticleSpider(scrapy.Spider):
name = 'article'
allowed_domains = ['www.timesofindia.indiatimes.com/business']
start_urls = ['https://timesofindia.indiatimes.com/business']
def parse(self, response):
links = response.css('span.w_tle a::attr(href)').extract()
for link in links:
url = urljoin(response.url, link)
yield scrapy.Request(url,callback=self.parse_article)
def parse_article(self,response):
for info in response.css('div.article_content clearfix'):
yield {'Article':info.css('div.Normal::text').extract()}
If you take a look at your log you'll see 'offsite/filtered': 211, and that the cause of not getting anything. In order to dodge this you can do two things:
Remove allowed_domains field
Add dont_filter=True in your request like:
yield scrapy.Request(url,callback=self.parse_article, dont_filter=True)
I tested your code it does not seems to work properly if you want to get text body so i rewrote it with XPath which I am more comfortable with.
import scrapy
from urllib.parse import urljoin
class ArticleSpider(scrapy.Spider):
name = 'article'
allowed_domains = ['www.timesofindia.indiatimes.com']
start_urls = ['https://timesofindia.indiatimes.com/business']
def parse(self, response):
links = response.xpath('//*[#id="c_listing_wdt_1"]//span[1]/a/#href').getall()
for link in links:
url = urljoin(response.url, link)
yield scrapy.Request(url,callback=self.parse_article, dont_filter=True)
def parse_article(self, response):
print(response.xpath('//*[#id="content"]//arttextxml//div//text()').getall())
for info in response.xpath('//*[#id="content"]//arttextxml//div//text()').getall():
yield {'Article':info}
getall() can be used instead of extract(), they are almost equal.

Downloading files with ItemLoaders() in Scrapy

I created a crawl spider to download files. However the spider downloaded only the urls of the files and not the files themselves. I uploaded a question here Scrapy crawl spider does not download files? . While the the basic yield spider kindly suggested in the answers works perfectly, when I attempt to download files with items or item loaders the spider does not work! The original question does not include the items.py. So there it is:
ITEMS
import scrapy
from scrapy.item import Item, Field
class DepositsusaItem(Item):
# main fields
name = Field()
file_urls = Field()
files = Field()
# Housekeeping Fields
url = Field()
project = Field()
spider = Field()
server = Field()
date = Field()
pass
EDIT: added original code
EDIT: further corrections
SPIDER
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import datetime
import socket
from us_deposits.items import DepositsusaItem
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
from urllib.parse import urljoin
class DepositsSpider(CrawlSpider):
name = 'deposits'
allowed_domains = ['doi.org']
start_urls = ['https://minerals.usgs.gov/science/mineral-deposit-database/#products', ]
rules = (
Rule(LinkExtractor(restrict_xpaths='//*[#id="products"][1]/p/a'),
callback='parse_x'),
)
def parse_x(self, response):
i = ItemLoader(item=DepositsusaItem(), response=response)
i.add_xpath('name', '//*[#class="container"][1]/header/h1/text()')
i.add_xpath('file_urls', '//span[starts-with(#data-url, "/catalog/file/get/")]/#data-url',
MapCompose(lambda i: urljoin(response.url, i))
)
i.add_value('url', response.url)
i.add_value('project', self.settings.get('BOT_NAME'))
i.add_value('spider', self.name)
i.add_value('server', socket.gethostname())
i.add_value('date', datetime.datetime.now())
return i.load_item()
SETTINGS
BOT_NAME = 'us_deposits'
SPIDER_MODULES = ['us_deposits.spiders']
NEWSPIDER_MODULE = 'us_deposits.spiders'
ROBOTSTXT_OBEY = False
ITEM_PIPELINES = {
'us_deposits.pipelines.UsDepositsPipeline': 1,
'us_deposits.pipelines.FilesPipeline': 2
}
FILES_STORE = 'C:/Users/User/Documents/Python WebCrawling Learning Projects'
PIPELINES
class UsDepositsPipeline(object):
def process_item(self, item, spider):
return item
class FilesPipeline(object):
def process_item(self, item, spider):
return item
It seems to me that using items and/or item loaders has nothing to do with your problem.
The only problems I see are in your settings file:
FilesPipeline is not activated (only us_deposits.pipelines.UsDepositsPipeline is)
FILES_STORE should be a string, not a set (an exception is raised when you activate the files pipeline)
ROBOTSTXT_OBEY = True will prevent the downloading of files
If I correct all of those issues, the file download works as expected.

Resources