Collecting all links in the main page and sub-pages - python-3.x

I am trying make a script collect all links in main and sub-pages
For example, I need to collect example.com, example.com/link1, example.com/link1/sub-link1, ...
Please check my code
import csv
from selenium import webdriver
from selenium.webdriver.common.by import By
# Instantiate webdriver
driver = webdriver.Chrome()
# Open Chrome maximized
driver.maximize_window()
# Open a link
driver.get("https://www.calculator.net/")
links = driver.find_elements(By.TAG_NAME, 'a')
header = ['URL', 'Name']
with open('/home/user/calculator.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
# write the header
writer.writerow(header)
for link in links:
href = link.get_attribute("href")
data = [href, link.text]
# write the data
writer.writerow(data)
Please note that I tried to add driver.get(href) to the for loop, but that did not work

Try with BeautifulSoup module and let driver sleep a little.
from bs4 import BeautifulSoup
...
driver.get("https://www.calculator.net/")
time.sleep(5)
html = driver.page_source
driver.close()
bs = BeautifulSoup(html, 'html.parser')
links = bs.find_all('a')
for link in links:
print(link.attrs['href']

Related

How to get post links from the whole page using BeautifulSoup Selenium

I'm having trouble trying to web scraping using BeautifulSoup and Selenium. The problem I have is i want to try pulling data from pages 1-20. But somehow the data that was successfully pulled was only up to page 10. It is possible that the number of the last page limit that I would take could be more than 20, but the results of the code I made could only pull 10 pages. Does anyone have an understanding for the problem to be able to pull a lot of data without page limit?
options = webdriver.ChromeOptions()
options.add_argument('-headless')
options.add_argument('-no-sandbox')
options.add_argument('-disable-dev-shm-usage')
driver = webdriver.Chrome('chromedriver',options=options)
apartment_urls = []
try:
for page in range(1,20):
print(f"Extraction Page# {page}")
page="https://www.99.co/id/sewa/apartemen/jakarta?kamar_tidur_min=1&kamar_tidur_maks=4&kamar_mandi_min=1&kamar_mandi_maks=4&tipe_sewa=bulanan&hlmn=" + str(page)
driver.get(page)
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'html.parser')
apart_info_list = soup.select('h2.search-card-redesign__address a[href]')
for link in apart_info_list:
get_url = '{0}{1}'.format('https://www.99.co', link['href'])
print(get_url)
apartment_urls.append(get_url)
except:
print("Good Bye!")
This is the output of the code. When pages 10,11,12 and so on I can't get the data
Now, pagination is working fine without page limit.
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
driver.get('https://www.99.co/id/sewa/apartemen/jakarta?kamar_tidur_min=1&kamar_tidur_maks=4&kamar_mandi_min=1&kamar_mandi_maks=4&tipe_sewa=bulanan')
time.sleep(5)
driver.maximize_window()
while True:
soup = BeautifulSoup(driver.page_source, 'html.parser')
apart_info_list = soup.select('h2.search-card-redesign__address a')
for link in apart_info_list:
get_url = '{0}{1}'.format('https://www.99.co', link['href'])
print(get_url)
next_button = driver.find_element(By.CSS_SELECTOR,'li.next > a ')
if next_button:
button = next_button.click()
time.sleep(3)
else:
break
If you would prefer to use: webdriverManager
Alternative solution: As the next page url isn't dynamic, It's also working fine.
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
driver.get('https://www.99.co/id/sewa/apartemen/jakarta?kamar_tidur_min=1&kamar_tidur_maks=4&kamar_mandi_min=1&kamar_mandi_maks=4&tipe_sewa=bulanan')
time.sleep(5)
driver.maximize_window()
while True:
soup = BeautifulSoup(driver.page_source, 'html.parser')
apart_info_list = soup.select('h2.search-card-redesign__address a')
for link in apart_info_list:
get_url = '{0}{1}'.format('https://www.99.co', link['href'])
print(get_url)
# next_button = driver.find_element(By.CSS_SELECTOR,'li.next > a ')
# if next_button:
# button = next_button.click()
# time.sleep(3)
next_page = soup.select_one('li.next > a ')
if next_page:
next_page = f'https://www.99.co{next_page}'
else:
break

Scraping infomation from a div tag inside of a div tag

I have been trying to web scrape from this website: https://octane.gg/events/e83e-rlcs-x-championship-europe/stats/players
I want to get specific ratings but i get nothing when i execute this code:
from bs4 import BeautifulSoup
import requests
result = requests.get("https://octane.gg/events/e83e-rlcs-x-championship-europe/stats/players")
src = result.content
soup = BeautifulSoup(src, 'lxml')
match = soup.find('div', class_='css-gm45eu')
print(match)
output: None
How can I scrape what is in that class?
Try to use Selenium:
from selenium import webdriver
driver = webdriver.Chrome('PATH_TO --> chromedriver.exe')
driver.get("https://octane.gg/events/e83e-rlcs-x-championship-europe/stats/players")
ratings = driver.find_elements_by_xpath('//div[#class="css-gm45eu"]')
ratings_list = []
for p in range(len(ratings)):
ratings_list.append(ratings[p].text)
print(ratings_list)
Output:
['1.189', '1.109', '1.098', '1.031', '1.028', '1.005', '0.990', '0.981', '0.967', '0.936', '0.904', '0.846', '0.841', '0.840', '0.836', '0.809', '0.759', '0.726']
Download chromedriver.exe:
https://chromedriver.chromium.org/downloads
if you don't want a chrome window to open while running, use this code:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument('--headless')
driver = webdriver.Chrome('PATH_TO --> chromedriver.exe', options=options)
driver.get("https://octane.gg/events/e83e-rlcs-x-championship-europe/stats/players")
ratings = driver.find_elements_by_xpath('//div[#class="css-gm45eu"]')
ratings_list = []
for p in range(len(ratings)):
ratings_list.append(ratings[p].text)
print(ratings_list)

How to extract name and links from a given website - python

For below mentioned website, I am trying to find the name and its corresponding link from that site. But not able to pass/get the data at all.
Using BeautifulSoup
from bs4 import BeautifulSoup
import requests
source = requests.get('https://mommypoppins.com/events/115/los-angeles/all/tag/all/age/all/all/deals/0/near/0/0')
soup = BeautifulSoup(source.text, 'html.parser')
mains = soup.find_all("div", {"class": "list-container-wrapper"})
name = []
lnks = []
for main in mains:
name.append(main.find("a").text)
lnks.append(main.find("a").get('href'))
Using Selenium webdriver
from selenium import webdriver
driver = webdriver.Chrome(executable_path=r"chromedriver_win32\chromedriver.exe")
driver.get("https://mommypoppins.com/events/115/los-angeles/all/tag/all/age/all/all/deals/0/near/0/0")
lnks = []
name = []
for a in driver.find_elements_by_class_name('ng-star-inserted'):
link = a.get_attribute('href')
lnks.append(link)
nm = driver.find_element_by_css_selector("#list-item-0 > div > h2 > a").text
name.append(nm)
I have tried with both 2 above methods.
Example:
name = ['Friday Night Flicks Drive-In at the Roadium', 'Open: Butterfly Pavilion and Nature Gardens']
lnks = ['https://mommypoppins.com/los-angeles-kids/event/in-person/friday-night-flicks-drive-in-at-the-roadium','https://mommypoppins.com/los-angeles-kids/event/in-person/open-butterfly-pavilion-and-nature-gardens']
Here's solution for webdriver:
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.get('https://mommypoppins.com/events/115/los-angeles/all/tag/all/age/all/all/deals/0/near/0/0')
time.sleep(3)
elements = driver.find_elements(By.XPATH, "//a[#angularticsaction='expanded-detail']")
attributes = [{el.text: el.get_attribute('href')} for el in elements]
print(attributes)
print(len(attributes))
driver.quit()
Here's solution with webdriver and bs4:
import time
from selenium import webdriver
from bs4 import BeautifulSoup
driver = webdriver.Chrome()
driver.get('https://mommypoppins.com/events/115/los-angeles/all/tag/all/age/all/all/deals/0/near/0/0')
time.sleep(3)
soup = BeautifulSoup(driver.page_source, 'html.parser')
mains = soup.find_all("a", {"angularticsaction": "expanded-detail"})
attributes = [{el.text: el.get('href')} for el in mains]
print(attributes)
print(len(attributes))
driver.quit()
Here's solution with requests:
import requests
url = "https://mommypoppins.com"
response = requests.get(f"{url}/contentasjson/custom_data/events_ng-block_1x/0/115/all/all/all/all/all").json()
attributes = [{r.get('node_title'): f"{url}{r['node'][r['nid']]['node_url']}"} for r in response['results']]
print(attributes)
print(len(attributes))
cheers!
The website is loaded dynamically, therefore requests won't support it. However, the data is available in JSON format via sending a GET request to:
https://mommypoppins.com/contentasjson/custom_data/events_ng-block_1x/0/115/all/all/all/all/all.
There's no need for BeautifulSoup or Selenium, using merely requests would work, which will make your code much faster.
import requests
URL = "https://mommypoppins.com/contentasjson/custom_data/events_ng-block_1x/0/115/all/all/all/all/all"
BASE_URL = "https://mommypoppins.com"
response = requests.get(URL).json()
names = []
links = []
for json_data in response["results"]:
data = json_data["node"][json_data["nid"]]
names.append(data["title"])
links.append(BASE_URL + data["node_url"])

How to scrape links from faceit

I am trying to scrape code from a faceit room, this is what i've tried but it doesn't work.
Any help is much appreciated!
import requests
from bs4 import BeautifulSoup
r = requests.get('https://www.faceit.com/en/csgo/room/1-8d6729b5-cfeb-4059-8894-3b07e04e76b2')
soup = BeautifulSoup(r.content, 'html.parser')
extracted_link = soup.find_all('href', class_='list-unstyled')
print(extracted_link)
Example Link : https://www.faceit.com/en/csgo/room/1-8d6729b5-cfeb-4059-8894-3b07e04e76b2
Example Link Extracted : https://demos-europe-west2.faceit-cdn.net/csgo/f9eadb47-aea5-4672-9499-4f457c7d28bd.dem.gz
Example : https://paste.pics/AQBQY
All of the contents of the page is loaded dynamically, which means that BeautifulSoup won't see it. So you actually might be better off using selenium with a webdriver in headless mode.
For example:
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.headless = True
driver = webdriver.Chrome(options=options)
url = "https://www.faceit.com/en/csgo/room/1-8d6729b5-cfeb-4059-8894-3b07e04e76b2"
driver.get(url)
time.sleep(2)
element = driver.find_element_by_css_selector('.match-vs .btn-default')
print(element.get_attribute("href"))
Output:
https://demos-europe-west2.faceit-cdn.net/csgo/f9eadb47-aea5-4672-9499-4f457c7d28bd.dem.gz
You can also do it using only requests:
import requests as r
room_id = '1-8d6729b5-cfeb-4059-8894-3b07e04e76b2'
link = 'https://api.faceit.com/match/v2/match/'+room_id
res = r.get(link)
data = res.json()
extracted_links = data['payload']['demoURLs']
print(extracted_links)
The code probes their API to get all the data at once as JSON, then just extract the needed information.

Reading multiple URLs from a text file, processing each web page, and scraping the content inside

I have a .txt file with a list of multiple URLs. My purpose is to open this .txt file, access each URL in each line, scrape the content inside each URL, and append the content with list of multiple URLs in the txt file to the "draft.csv" file.
When I tried to run other codes, the recommended request result shows "Please turn on JavaScript and refresh the page", so I intended to use Selenium instead to get this resolved. I am able to fetch all the pages as wanted, but unable to see the desired content in each link.
Below is list of multiple URLs for example:
http://example.com/2267/15175/index.html
http://example.com/2267/16796/index.html
http://example.com/2267/17895/index.html
This is my current code using Selenium and Requests.
from lxml import etree
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
import pandas as pd
import urllib.request
import requests
frame =[]
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(options = chrome_options)
with open("draft.txt", "r") as file:
for line in file:
url = line.rstrip("\n")
print(url)
driver.get(url)
html = etree.HTML(driver.page_source)
allurl = requests.get(url)
htmltext = allurl.text
extract_link = html.xpath('//span[#id="my_two"]/table/tbody/tr/td/table[2]')
for i in extract_link:
link = i.xpath('./tbody/tr/td/div/p/a/#href')
content = 'http://example.com'+ link[0]
frame.append({
'content': content,
})
dfs = pd.DataFrame(frame)
dfs.to_csv('draft.csv',index=False,encoding='utf-8-sig')
Thank you in advance for helping me with this!
You must load selenium inside a for loop, and you can use bs4 for scraping:
from selenium import webdriver
from bs4 import BeautifulSoup
f = open("urls.txt")
urls = [url.strip() for url in f.readlines()]
For url in urls:
driver.get(url)
...
html = driver.page_source
soup = BeautifulSoup(html)
Information = soup.find('title')
Url = url
...
driver.quit()

Resources