I'm trying to scrape this news website "https://inshorts.com/en/read/national" and I'm fetching results of articles with heads Headline and news. I need all the articles on the pages which contain a specific word (e.g., "health") and add "date" on the head.
Here's my code:
import requests
from bs4 import BeautifulSoup
import pandas as pd
from tqdm import tqdm
# code for scraping the first page
d={'headlines':[],'news':[], 'date':[]}
r = requests.get("https://inshorts.com/en/read/national")
soup = BeautifulSoup(r.content, 'html.parser')
min_news_id = soup.findAll("script",{"type":"text/javascript"})[2].text
min_news_id = min_news_id[25:35]
soup=soup.findAll("div",{"class":"news-card z-depth-1"})
#to search specific word in the content
soup = soup.find_all(text=re.compile("Health"))
for data in soup:
d['headlines'].append(data.find(itemprop="headline").getText())
d['news'].append(data.find(itemprop="articleBody").getText())
d['date'].append(data.find(itemprop="date").getText())
# code for scraping more pages
for i in tqdm(range(10)):
# It uses JavaScript to load more data from
# https://inshorts.com/en/ajax/more_news using POST requests
# with parameter 'news_offset' which informs server what page
# it has to send to client.
# we can make POST requests with this parameter to get new
# data in JSON format
try:
params = {'news_offset': min_news_id}
req = requests.post("https://inshorts.com/en/ajax/more_news",data=params)
#In JSON you have HTML in json_data['html'] and
#json_data['min_news_id'] for next page
json_data = req.json()
min_news_id = json_data['min_news_id']
soup = BeautifulSoup(json_data['html'], 'html.parser')
soup=soup.findAll("div",{"class":"news-card z-depth-1"})
for data in soup:
d['headlines'].append(data.find(itemprop="headline").getText())
d['news'].append(data.find(itemprop="articleBody").getText())
d['date'].append(data.find(itemprop="date").getText())
except:
pass
# storing the data into .csv file
df = pd.DataFrame(d)
df.to_csv("inshorts_news.csv", index=False)
And here's the error:
AttributeError Traceback (most recent call last)
<ipython-input-2-2d109f9dfc91> in <module>()
12
13 #to search specific word in the content
---> 14 soup = soup.find_all(text=re.compile("Health"))
15
16 for data in soup:
/usr/local/lib/python3.7/dist-packages/bs4/element.py in __getattr__(self, key)
1882 def __getattr__(self, key):
1883 raise AttributeError(
-> 1884 "ResultSet object has no attribute '%s'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?" % key
1885 )
AttributeError: ResultSet object has no attribute 'find_all'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?
What happens?
As the error tells you are trying to find_all() on a ResultSet object, that wont work.
How to fix?
Iterate over the elements of the object and check there for your keyword:
for data in soup.select('div.news-card.z-depth-1'):
if data.find(text=re.compile("farmer")):
Example
import requests
from bs4 import BeautifulSoup
import pandas as pd
from tqdm import tqdm
import re
# code for scraping the first page
d=[]
r = requests.get("https://inshorts.com/en/read/national")
soup = BeautifulSoup(r.content, 'html.parser')
min_news_id = soup.findAll("script",{"type":"text/javascript"})[2].text
min_news_id = min_news_id[25:35]
# code for scraping more pages
for i in tqdm(range(2)):
try:
params = {'news_offset': min_news_id}
req = requests.post("https://inshorts.com/en/ajax/more_news",data=params)
json_data = req.json()
min_news_id = json_data['min_news_id']
soup = BeautifulSoup(json_data['html'], 'html.parser')
for data in soup.select('div.news-card.z-depth-1'):
if data.find(text=re.compile("farmer")):
d.append({
'headline': data.find(itemprop="headline").getText(),
'article': data.find(itemprop="articleBody").getText()
})
except Exception as e:
print (e)
pd.DataFrame(d)
Output
headline article
0 Heavy traffic seen on DND Flyway at Noida toll... Heavy traffic was witnessed on Delhi Noida Dir...
1 Farmers take out protest march in Haryana over... Farmers have taken out a protest march in Hary...
2 Akhilesh Yadav detained in Lucknow after sit-i... Samajwadi Party President Akhilesh Yadav was d...
3 Priyanka detained on way to UP's Lakhimpur Khe... Congress leader Priyanka Gandhi Vadra was deta...
4 Rakesh Tikait reaches UP's Lakhimpur Kheri aft... BKU leader Rakesh Tikait reached UP's Lakhimpu...
5 Opposition to start with 'Photo Ops' in Lakhim... Uttar Pradesh Cabinet Minister Sidharth Nath S...
Related
I am trying to write a script that can retrieve album title and band name from a music store newsletter. The band name and album title are hidden in a h3 & h4 class. When executing the script I get a blank output in the csv file.
`
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Use the requests library to fetch the HTML content of the page
url = "https://www.musicmaniarecords.be/_sys/newsl_view?n=260&sub=Tmpw6Rij5D"
response = requests.get(url)
# Use the BeautifulSoup library to parse the HTML content
soup = BeautifulSoup(response.text, 'html.parser')
# Find all 'a' elements with the class 'row'
albums = soup.find_all('a', attrs={'class': 'row'})
# Iterate over the found elements and extract the album title and band name
album_title = []
band_name = []
for album in albums:
album_title_element = album.find('td', attrs={'td_class': 'h3 class'})
band_name_element = album.find('td', attrs={'td_class': 'h4 class'})
album_title.append(album_title_element.text)
band_name.append(band_name_element.text)
# Use the pandas library to save the extracted data to a CSV file
df = pd.DataFrame({'album_title': album_title, 'band_name': band_name})
df.to_csv('music_records.csv')
I think the error is in the attrs part, not sure how to fix it properly. Thanks in advance!
Looking at your code I agree that the error lies in the attrs part. The problem you are facing is that the site you are trying to scrape does not contain 'a' elements with the 'row' class. Thus find_all returns an empty list. There are plenty of 'div' elements with the 'row' class, maybe you meant to look for those?
You had the right idea by looking for 'td' elements and extracting their 'h3' and 'h4' elements, but since albums is an empty list, there are no elements to find.
I changed your code slightly to look for 'td' elements directly and extract their 'h3' and 'h4' elements. With these small changes your code found 29 albums.
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Use the requests library to fetch the HTML content of the page
url = "https://www.musicmaniarecords.be/_sys/newsl_view?n=260&sub=Tmpw6Rij5D"
response = requests.get(url)
# Use the BeautifulSoup library to parse the HTML content
soup = BeautifulSoup(response.text, 'html.parser')
# Find all 'a' elements with the class 'row'
albums = soup.find_all('td', attrs={'class': 'block__cell'} )
# Iterate over the found elements and extract the album title and band name
album_title = []
band_name = []
for i, album in enumerate(albums):
album_title_element = album.find('h3')
band_name_element = album.find('h4')
album_title.append(album_title_element.text)
band_name.append(band_name_element.text)
# Use the pandas library to save the extracted data to a CSV file
df = pd.DataFrame({'album_title': album_title, 'band_name': band_name})
df.to_csv('music_records.csv', index=False)
I also took the liberty of adding index=False to the last line of your code. This makes it so each row doesn't start with a ,.
Hope this helps.
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Use the requests library to fetch the HTML content of the page
url = "https://www.musicmaniarecords.be/_sys/newsl_view?n=260&sub=Tmpw6Rij5D"
response = requests.get(url)
# Use the BeautifulSoup library to parse the HTML content
soup = BeautifulSoup(response.text, 'html.parser')
# Find all 'a' elements with the class 'row'
albums = soup.find_all('td', attrs={'class': 'block__cell'})
# Iterate over the found elements and extract the album title and band name
album_title = []
band_name = []
for album in albums:
album_title_element = album.find('h3', attrs={'class': 'header'})
band_name_element = album.find('h4', attrs={'class': 'header'})
album_title.append(album_title_element.text)
band_name.append(band_name_element.text)
# Use the pandas library to save the extracted data to a CSV file
df = pd.DataFrame({'album_title': album_title, 'band_name': band_name})
df.to_csv('music_records.csv')
Thanks to the anonymous hero for helping out!
I need to loop all the entries of all the pages from this link, then click the menu check in the red part (please see the image below) to enter the detail of each entry:
The objective is to cralwer the infos from the pages such as image below, and save left part as column names and right part as rows:
The code I used:
import requests
import json
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
url = 'http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=425000'
content = requests.get(url).text
soup = BeautifulSoup(content, 'lxml')
table = soup.find('table', {'class': 'gridview'})
df = pd.read_html(str(table))[0]
print(df.head(5))
Out:
序号 工程名称 ... 发证日期 详细信息
0 NaN 假日万恒社区卫生服务站装饰装修工程 ... 2020-07-07 查看
The code for entering the detailed pages:
url = 'http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=308891&t=toDetail&GCBM=202006202001'
content = requests.get(url).text
soup = BeautifulSoup(content, 'lxml')
table = soup.find("table", attrs={"class":"detailview"}).findAll("tr")
for elements in table:
inner_elements = elements.findAll("td", attrs={"class":"label"})
for text_for_elements in inner_elements:
print(text_for_elements.text)
Out:
工程名称:
施工许可证号:
所在区县:
建设单位:
工程规模(平方米):
发证日期:
建设地址:
施工单位:
监理单位:
设计单位:
行政相对人代码:
法定代表人姓名:
许可机关:
As you can see, I only get column name, no entries have been successfully extracted.
In order to loop all pages, I think we need to use post requests, but I don't know how to get headers.
Thanks for your help at advance.
This script will go for all pages and gets the data into a DataFrame and saves them to data.csv.
(!!! Warning !!! there are 2405 pages total, so it takes a long time to get them all):
import requests
import pandas as pd
from pprint import pprint
from bs4 import BeautifulSoup
url = 'http://bjjs.zjw.beijing.gov.cn/eportal/ui?pageId=425000'
payload = {'currentPage': 1, 'pageSize':15}
def scrape_page(url):
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
return {td.get_text(strip=True).replace(':', ''): td.find_next('td').get_text(strip=True) for td in soup.select('td.label')}
all_data = []
current_page = 1
while True:
print('Page {}...'.format(current_page))
payload['currentPage'] = current_page
soup = BeautifulSoup(requests.post(url, data=payload).content, 'html.parser')
for a in soup.select('a:contains("查看")'):
u = 'http://bjjs.zjw.beijing.gov.cn' + a['href']
d = scrape_page(u)
all_data.append(d)
pprint(d)
page_next = soup.select_one('a:contains("下一页")[onclick]')
if not page_next:
break
current_page += 1
df = pd.DataFrame(all_data)
df.to_csv('data.csv')
Prints the data to screen and saves data.csv (screenshot from LibreOffice):
Very new to webscraping and so I appreciate all the help. I'm trying to build a model that pulls in values from an NHL reference table found
here:
https://www.hockey-reference.com/leagues/NHL_2019.html#
I'm only trying to pull values pertaining to the 'Team Statistics' table which contains the aggregate team data. I'm making some progress but I am stuck when trying to pull the row data for each team and store it for future calculations. Here is my code so far:
from urllib.request import urlopen
import pandas as pd
from bs4 import BeautifulSoup
url = "https://www.hockey-reference.com/leagues/NHL_2019.html"
html = urlopen(url)
soup = BeautifulSoup(html, "lxml")
all_stats = soup.find('div', {'id': 'all_stats'})
print(all_stats)
With this code I'm able to see the row information I need in HTML form but any attempt to pull that data results in finding None. I imagine I have to assign each team and td value a variable so I can call on it in the future. There are 30 rows of data that I need to collect.
Thanks for the help,
George
The reason is Team Statistics table is in Comment line so you don't parse it.
In this case you can use Comment from bs4 like :
from bs4 import BeautifulSoup , Comment
from urllib import urlopen
search_url = 'https://www.hockey-reference.com/leagues/NHL_2019.html#'
page = urlopen(search_url)
soup = BeautifulSoup(page, "html.parser")
table = soup.findAll('table') ## html part with no comment
table_with_comment = soup.findAll(text=lambda text:isinstance(text, Comment))
[comment.extract() for comment in table_with_comment]
## print table_with_comment print all comment line
for c in table_with_comment:
a = BeautifulSoup(c, "html.parser")
teams = a.findAll('td', attrs={'class':'left'}) # Team
values = a.findAll('td', attrs={'class':'right'}) #stats
for getvalues in values:
print getvalues.text
for gettextinElement in teams:
print gettextinElement.text
OUTPUT :
For Stats:
27.1
62
47
11
4
98
.790
239
162
5
1
1.26
-0.05
6.47
172
131
61 ..UP TO END
For Teams :
Tampa Bay Lightning
Calgary Flames
Boston Bruins
San Jose Sharks
New York Islanders
Toronto Maple Leafs
Winnipeg Jets
Nashville Predators
Washington Capitals
Columbus Blue Jackets .. UP TO END
A variation on #Omer Tekbiyik's answer that will also get the data into a dataframe:
from bs4 import BeautifulSoup as bs4, Comment
import requests
import pandas as pd
url = "https://www.hockey-reference.com/leagues/NHL_2019.html#"
res= requests.get(url)
soup = bs4(res.content, 'lxml')
table = soup.findAll('table')
table_with_comment = soup.findAll(text=lambda text:isinstance(text, Comment))
my_table = pd.read_html(table_with_comment[16])
my_table
The output is a dataframe with the Team Statistics table; from here you can run on it anything you can run on any pandas dataframe.
I was able to successfully scrape some text from a website and I'm now trying to load the text into a list so I can later convert it to a Pandas DataFrame.
The site supplied the data in a scsv format so it was quick to grab.
The following is my code:
import requests
from bs4 import BeautifulSoup
#Specify the url:url
url = "http://rotoguru1.com/cgi-bin/fyday.pl?week=1&year=2017&game=dk&scsv=1"
# Packages the request, send the request and catch the response: r
r = requests.get(url)
#Extract the response:html_doc
html_doc = r.text
soup = BeautifulSoup(html_doc,"html.parser")
#Find the tags associated with the data you need, in this case
# it's the "pre" tags
for data in soup.find_all("pre"):
print(data.text)
Sample Output
Week;Year;GID;Name;Pos;Team;h/a;Oppt;DK points;DK salary
1;2017;1254;Smith, Alex;QB;kan;a;nwe;34.02;5400 1;2017;1344;Bradford,
Sam;QB;min;h;nor;28.54;5900
use open function to write csv file
import requests
from bs4 import BeautifulSoup
url = "http://rotoguru1.com/cgi-bin/fyday.pl?week=1&year=2017&game=dk&scsv=1"
r = requests.get(url)
html_doc = r.content
soup = BeautifulSoup(html_doc,"html.parser")
file = open(“data.csv”,”w”)
for data in soup.find("pre").text.split('\n'):
file.write(data.replace(';',','))
file.close()
Here's one thing you can do, although it's possible that someone who knows pandas better than I can suggest something better.
You have r.text. Put that into a convenient text file, let me call it temp.csv. Now you can use pandas read_csv method to get these data into a dataframe.
>>> df = pandas.read_csv('temp.csv', sep=';')
Addendum:
Suppose results were like this.
>>> results = [['a', 'b', 'c'], [1,2,3], [4,5,6]]
Then you could put them in a dataframe in this way.
>>> df = pandas.DataFrame(results[1:], columns=results[0])
>>> df
a b c
0 1 2 3
1 4 5 6
If u want to convert your existing code into list, using split method might do the job and then use pandas to convert it into dataframe.
import requests
from bs4 import BeautifulSoup
#Specify the url:url
url = "http://rotoguru1.com/cgi-bin/fyday.pl?week=1&year=2017&game=dk&scsv=1"
# Packages the request, send the request and catch the response: r
r = requests.get(url)
#Extract the response:html_doc
html_doc = r.text
soup = BeautifulSoup(html_doc,"html.parser")
#Find the tags associated with the data you need, in this case
# it's the "pre" tags
for data in soup.find_all("pre"):
print(data.text.split(";"))
I'm performing the same web scraping pattern that I just learned from post , however, I'm unable to scrap the using below script. I keep getting an empty return and I know the tags are there. I want to find_all "mubox" then pulls values for O/U and goalie information. This so weird, what am I missing?
from bs4 import BeautifulSoup
import requests
import pandas as pd
page_link = 'https://www.thespread.com/nhl-scores-matchups'
page_response = requests.get(page_link, timeout=10)
# here, we fetch the content from the url, using the requests library
page_content = BeautifulSoup(page_response.content, "html.parser")
# Take out the <div> of name and get its value
tables = page_content.find_all("div", class_="mubox")
print (tables)
# Iterate through rows
rows = []
This site uses an internal API before rendering the data. This api is an xml file, you can get here which contains all the match information. You can parse it using beautiful soup :
from bs4 import BeautifulSoup
import requests
page_link = 'https://www.thespread.com/matchups/NHL/matchup-list_20181030.xml'
page_response = requests.get(page_link, timeout=10)
body = BeautifulSoup(page_response.content, "lxml")
data = [
(
t.find("road").text,
t.find("roadgoalie").text,
t.find("home").text,
t.find("homegoalie").text,
float(t.find("ot").text),
float(t.find("otmoney").text),
float(t.find("ft").text),
float(t.find("ftmoney").text)
)
for t in body.find_all('event')
]
print(data)