Here's my script :
import warnings
warnings.filterwarnings("ignore")
import re
import json
import requests
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
URLs = ['https://www.frayssinet-joaillier.fr/fr/p/montre-the-longines-legend-diver-l37744302-bdc2']
TypeVendor = []
NameVendor = []
Marques = []
Brands = []
Refs = []
Prices = []
#Carts = []
#Links = []
Links = []
#df = pd.read_csv('testlink4.csv')
n=1
for url in URLs:
results = requests.get(url)
soup = BeautifulSoup(results.text, "html.parser")
TypeVendor.append('Distributeur')
NameVendor.append('Frayssinet')
Marques.append('Longines')
Brands.append(soup.find('span', class_ = 'main-detail__name').text)
Refs.append(soup.find('span', class_ = 'main-detail__ref').text)
Prices.append(soup.find('span', class_ = 'prix').text)
Links.append(url)
I understand why it doesn't work, text isn't adapted for dynamic content. But I cannot figure it out how to scrape this kind of content. I know if you find where the json data is sotred, yo ucan tweak with it and scrape the data.
But I checked on the google developer tools, on the network tab and I didn't find anything.
Set headers to your request and store your information in a more structured way.
Example
import requests
from bs4 import BeautifulSoup
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0'}
URLs = ['https://www.frayssinet-joaillier.fr/fr/p/montre-the-longines-legend-diver-l37744302-bdc2']
data = []
for url in URLs:
results = requests.get(url,headers=headers)
soup = BeautifulSoup(results.text, "html.parser")
data.append({
'name': soup.find('span', class_ = 'main-detail__name').get_text(strip=True),
'brand': soup.find('span', class_ = 'main-detail__marque').get_text(strip=True),
'ref':soup.find('span', class_ = 'main-detail__ref').get_text(strip=True),
'price':soup.find('span', {'itemprop':'price'}).get('content'),
'url':url
})
pd.DataFrame(data)
Output
name
brand
ref
price
url
Montre The Longines Legend Diver L3.774.4.30.2
Longines
Référence : L3.774.4.30.2
2240
https://www.frayssinet-joaillier.fr/fr/p/montre-the-longines-legend-diver-l37744302-bdc2
Related
I am trying to extract the stock market related data from the web browser. I am able to open the web browser and extract the data for one stock.
Below is the python code for “One stock” which opens the web browser with Selenium Webdriver and extract the data from the web page using the Beautifulsoup
This is very basic code which requires simplification and be able to extract the data for list of stock like the below
stock_list=['Infosys' , 'Reliance industries', 'wipro' ]
I am not sure how to extract the data for multiple item in the list as mentioned above and to simplify it based on this.
Python code to extract the data for one stock.
import requests
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
headers = {'User-Agent': 'Mozilla/5.0'}
browser = webdriver.Firefox()
browser.get("https://www.tickertape.in/stocks/")
browser.maximize_window()
inputElement=browser.find_element_by_id('search-stock-input')
inputElement.click()
inputElement.send_keys('Infosys')
inputElement.click()
inputElement = wait(browser, 5).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#search-stock-input")))
inputElement.click()
inputElement.send_keys(Keys.RETURN)
page = requests.get(browser.current_url,headers=headers)
from bs4 import BeautifulSoup
soup = BeautifulSoup(page.text, 'html.parser')
ScriptName = []
ScriptName_elem = soup.find_all( class_ = 'jsx-2256451 security-name')
for item in ScriptName_elem:
ScriptName.append(item.text)
intrinsic_value = []
intrinsic_value_elem = soup.find_all( class_ = 'jsx-3277407410 jsx-1058798148 lh-138 text-13 commentary-desc')
for item in intrinsic_value_elem:
intrinsic_value.append(item.text)
Returns_vs_FD_rates = []
Returns_vs_FD_rates_elem = soup.find_all( class_ = 'jsx-3947392323 jsx-1058798148 lh-138 text-13 commentary-desc')
for item in Returns_vs_FD_rates_elem:
Returns_vs_FD_rates.append(item.text)
Divident_Returns = []
Divident_Returns_elem = soup.find_all( class_ = 'jsx-566496888 jsx-1058798148 lh-138 text-13 commentary-desc')
for item in Divident_Returns_elem:
Divident_Returns.append(item.text)
Entry_Point = []
Entry_Point_elem = soup.find_all( class_ = 'jsx-3697483086 jsx-1058798148 lh-138 text-13 commentary-desc')
for item in Entry_Point_elem:
Entry_Point.append(item.text)
Red_Flag_Indicator = []
Red_Flag_Indicator_elem = soup.find_all( class_ = 'jsx-1920835126 jsx-1058798148 relative no-select tooltip-holder')
for item in Red_Flag_Indicator_elem:
Red_Flag_Indicator.append(item.text)
Red_Flag_Indicator_Reason = []
Red_Flag_Indicator_Reason_elem = soup.find_all( class_ = 'jsx-1920835126 jsx-1058798148 lh-138 text-13 commentary-desc')
for item in Red_Flag_Indicator_Reason_elem:
Red_Flag_Indicator_Reason.append(item.text)
df_array = []
for ScriptName_n, intrinsic_value_n,Returns_vs_FD_rates_n,Divident_Returns_n,Entry_Point_n,Red_Flag_Indicator_n,Red_Flag_Indicator_Reason_n in zip(ScriptName,intrinsic_value,Returns_vs_FD_rates,Divident_Returns,Entry_Point,Red_Flag_Indicator,Red_Flag_Indicator_Reason):
df_array.append({'ScriptName': ScriptName_n, 'intrinsic_value': intrinsic_value_n, 'Returns_vs_FD_rates' : Returns_vs_FD_rates_n, 'Divident_Returns' : Divident_Returns_n, 'Entry_Point' : Entry_Point_n,
'Red_Flag_Indicator' : Red_Flag_Indicator_n , 'Red_Flag_Indicator_Reason' : Red_Flag_Indicator_Reason_n })
df = pd.DataFrame(df_array)
df
Thanks in advance
You can call the same APIs the page does. The first API to get the id, and security name, for the stock to use for the second API which returns those checklist items.
If you create a list of dictionaries, one dictionary per ticker, you can then convert to a dataframe at end. If I have missed an item let me know. I also chose to store a lot of the other data e.g. low, high etc in another dictionary called other_data.
import requests
import pandas as pd
other_data = {}
results = []
stock_list = ['Infosys', 'Reliance industries', 'wipro']
with requests.Session() as s:
for ticker in stock_list:
try:
r = s.get(
f'https://api.tickertape.in/search?text={ticker.lower()}&types=stock,brands,index,etf,mutualfund').json()
stock_id = r['data']['stocks'][0]['sid']
name = r['data']['stocks'][0]['name']
other_data[stock_id] = r
r = s.get(
f'https://api.tickertape.in/stocks/investmentChecklists/{stock_id}?type=basic').json()
d = {i['title']: i['description'] for i in r['data']}
d = {**{'Security': name}, **other_data[stock_id]['data']['stocks'][0]['quote'], **{
'marketCap': other_data[stock_id]['data']['stocks'][0]['marketCap']}, **d}
results.append(d)
except Exception as e:
print(ticker, e)
df = pd.DataFrame(results)
df
I hope you guys are always healthy
I want to scrape a more specific table using BS4. this is my code:
from bs4 import BeautifulSoup
import requests
url = 'test.com'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
for row in soup.select('tbody tr'):
row_text = [x.text for x in row.find_all('td')]
print (row_text)
how do you get results like this:
Number, Name, address, telp, komoditi
1, "ABON JUARA" JUARA FOOD INDUSTRY, Jl. Jend Sudirman 339, Salatiga, Jawa Tengah, 0298-324060, Abon Sapi Dan Ayam
and saved in CSV
import requests
from bs4 import BeautifulSoup
import csv
def main(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
target = soup.select_one("table#newspaper-a").select("tr[valign=top]")
with open("data.csv", 'w', newline="") as f:
writer = csv.writer(f)
writer.writerow(["No", "Name", "Address", "Tel", "Komoditi"])
for item in target:
item = list(item.stripped_strings)
item[3] = item[3][6:]
writer.writerow(item)
main("https://kemenperin.go.id/direktori-perusahaan?what=&prov=&hal=1")
Output: view-online
Web scraping Python
' I am new to scraping. I want to scrape Premier League Season 2018-19 Results(fixtures, results, date), But i am struggling to navigate the web site. all i get is empty list / [None]. if you have a solution that you can share that will be a great help. '
'Here's what i tried.'
'''
import pandas as pd
import requests as uReq
from bs4 import BeautifulSoup
url = uReq.get('https://www.flashscore.com/football/england/premier-league-2018-2019/results/')
soup = BeautifulSoup(url.text, 'html.parser')
divs = soup.find_all('div', attrs={'id': 'live-table'})
Home = []
for div in divs:
anchor = div.find(class_='event__participant event__participant--home')
Home.append(anchor)
print(Home)
'''
You will have to install requests_html for my solution.
Here is how I will go about it:
from requests_html import AsyncHTMLSession
from collections import defaultdict
import pandas as pd
url = 'https://www.flashscore.com/football/england/premier-league-2018-2019/results/'
asession = AsyncHTMLSession()
async def get_scores():
r = await asession.get(url)
await r.html.arender()
return r
results = asession.run(get_scores)
results = results[0]
times = results.html.find("div.event__time")
home_teams = results.html.find("div.event__participant.event__participant--home")
scores = results.html.find("div.event__scores.fontBold")
away_teams = results.html.find("div.event__participant.event__participant--away")
event_part = results.html.find("div.event__part")
dict_res = defaultdict(list)
for ind in range(len(times)):
dict_res['times'].append(times[ind].text)
dict_res['home_teams'].append(home_teams[ind].text)
dict_res['scores'].append(scores[ind].text)
dict_res['away_teams'].append(away_teams[ind].text)
dict_res['event_part'].append(event_part[ind].text)
df_res = pd.DataFrame(dict_res)
This generates the following output:
I need to scrape the job descriptions in the page () for every job title like section (accounting) job title (staff accountant) job description text inside the title in different columns in a csv file using python beautiful soup module.
I'm new to beautiful soup i tried some ways of doing it but its not working can you please help with the code
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
start = time.time()
url = ""
data = []
while True:
resp = requests.get(url)
soup = BeautifulSoup(resp.content, 'lxml')
jobdesc = soup.find("li",{'class':'col-xs-12 col-sm-4'})
section=soup.find("h4")
jd = {"jobdescription":jobdesc.text,"topic":section.text}
data.append(jd)
df = pd.DataFrame(data)
df.to_csv("JD.csv")
Here is one way leveraging :has in bs4 4.7.1+ to isolate the sections for looping over. zip_longest is used so we can join section title on to each job.
import requests, csv
from bs4 import BeautifulSoup as bs
from itertools import zip_longest
r = requests.get('https://resources.workable.com/job-descriptions/#', headers = {'User-Agent':'Mozilla/5.0'})
soup = bs(r.content, 'lxml')
with open("data.csv", "w", encoding="utf-8-sig", newline='') as csv_file:
w = csv.writer(csv_file, delimiter = ",", quoting=csv.QUOTE_MINIMAL)
w.writerow(['Section','Job Title'])
for section in soup.select('section:has(.job)'):
title = section.select_one('a').text.strip()
jobs = [job.text for job in section.select('li a')]
rows = list(zip_longest([title], jobs, fillvalue = title))
for row in rows:
w.writerow(row)
I had a 403 forbidden using requests package, so I decide to use selenium
You can try this:
from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
from selenium import webdriver
url = "https://resources.workable.com/job-descriptions/#"
data = []
#resp = requests.get(url)
#soup = BeautifulSoup(resp.text, 'html.parser')
driver = webdriver.Firefox()
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
section = soup.find_all('section',{'class':'box-white'})
for s in section:
title = s.find('h4').text
lis = soup.find_all("li",{'class':'col-xs-12 col-sm-4'})
for li in lis:
jd = {"jobdescription":li.text,"topic":title}
data.append(jd)
df = pd.DataFrame(data)
df.to_csv("JD.csv")
EDIT: To get description for all jobs
from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
from selenium import webdriver
url = "https://resources.workable.com/job-descriptions/#"
data = []
#resp = requests.get(url)
#soup = BeautifulSoup(resp.text, 'html.parser')
driver = webdriver.Firefox()
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
section = soup.find_all('section',{'class':'box-white'})
for s in section:
title = s.find('h4').text
lis = s.find_all("li",{'class':'col-xs-12 col-sm-4'})
for li in lis:
job = li.text
driver.get(li.find('a').get('href'))
soup2 = BeautifulSoup(driver.page_source, 'html.parser')
jd = {"job":job,"topic":title, "description": soup2.find('div',{'class':'entry-content article-content'}).text}
data.append(jd)
df = pd.DataFrame(data)
df.to_csv("JD.csv")
Scraping data from monster jobs and uploading to Mongo DB.
from time import *
from selenium import webdriver
import pymongo
from pymongo.results import InsertManyResult
import os
client = pymongo.MongoClient()
mydb = client['jobs']
collection = mydb['med_title']
driver = webdriver.Chrome("C:/Users/91798/Desktop/pythn_files/chromedriver.exe")
driver.get("https://www.monsterindia.com/")
driver.implicitly_wait(9)
driver.find_element_by_id("SE_home_autocomplete").send_keys("nursing , Therapist , docter , medical ,nurse , hospital")
#for normal search use this
driver.find_element_by_xpath("//body/div[#id='themeDefault']/section[1]/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]/div[1]/div[2]/form[1]/div[1]/div[2]/input[1]").click()
driver.implicitly_wait(20)
temp = 1
while(True):
if temp == 5:
break
all_jobs = driver.find_elements_by_class_name("card-apply-content")
link_list = []
for job in all_jobs:
try:
company = ""
com_name = job.find_elements_by_class_name("job-tittle")
driver.implicitly_wait(1)
for ele in com_name:
company = ele.find_element_by_class_name('company-name').text
job_title = ""
for ele in com_name:
job_title = ele.find_element_by_class_name('medium').text
location = job.find_element_by_class_name("loc").text
driver.implicitly_wait(1)
lnks= job.find_elements_by_tag_name("a")
for lnk in lnks:
link_list.append(lnk.get_attribute('href'))
break
driver.implicitly_wait(1)
desc = job.find_element_by_class_name("job-descrip").text
driver.implicitly_wait(1)
skills = job.find_element_by_class_name("descrip-skills").text
except:
desc = 'desc Not Specified'
skills = 'skills Not Specified'
location = ' location Not Specified'
company = 'company Not Specified'
job_title = 'job_title not specified'
s = skills.split(' ')
for i in s:
if i == ',':
s.remove(',')
data = {"job_title" : job_title ,"comapany_name": company,"job_location":
location,"job_desc":desc,"skills":s[2::],"card_link":link_list[0]}
link_list.clear()
y = collection.insert_one(data)
print(y.inserted_id)
driver.find_element_by_xpath("//button[contains(text(),'Next')]").click()
sleep(25)
temp = temp +1
I have been using BeautifulSoup to scrape the pricing information from
"https://www.huaweicloud.com/pricing.html#/ecs"
I want to extract the table information of that website, but I get nothing.
I am using Windows 10 , the latest BeautifulSoup , Request and Python3.7
import requests
from bs4 import BeautifulSoup
url = 'https://www.huaweicloud.com/pricing.html#/ecs'
headers = {'User-Agent':'Mozilla/5.0'}
response = requests.get(url,headers=headers)
soup = BeautifulSoup(response.content,'html.parser')
soup.find_all('table')
After running the soup.find_all('table') , it returns an empty list: []
I know this is not the answer to your question, but this might help you. This is the code I came up with using selenium & BeautifulSoup. You just have to specify the location of chromedriver, and the script is good to go.
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import pandas as pd
url = 'https://www.huaweicloud.com/pricing.html#/ecs'
driver = webdriver.Chrome("location of chrome driver")
driver.get(str(url))
driver.find_element_by_id("calculator_tab0").click()
time.sleep(3)
html_source = driver.page_source
soup = BeautifulSoup(html_source, features="lxml")
table_all = soup.findAll("table")
output_rows = []
for table in table_all[:2]:
for table_row in table.findAll('tr'):
thead = table_row.findAll('th')
columns = table_row.findAll('td')
_thead = []
for th in thead:
_thead.append(th.text)
output_rows.append(_thead)
_row = []
for column in columns:
_row.append(column.text)
output_rows.append(_row)
output_rows = [x for x in output_rows if x != []]
df = pd.DataFrame(output_rows)