Get access to data in dynamic table using Selenium - python-3.x

Below code works and fills out two forms which is required to get to the table on this page: https://forsikringsguiden.dk/#!/bilforsikring/resultatside
Once the forms are filled out the table shows an overview over difference insurance firms and how much you will pay yearly for car insurance. So a comparison service. I need to scrape this overview once a week.
I am unsure how to do this. I know how to use BS4 for scraping, but I need the firm names as well, and not only the information, and this is not available by inspecting the website in Chrome. If I dive into
the network and look into XHR i find this link
https://forsikringsguiden.dk/signalr/poll?transport=longPolling&messageId=d-D7589F50-A%2C0%7C9%2C0%7C_%2C1%7C%3A%2C0&clientProtocol=1.4&connectionToken=fUYa3MT52oKf77Y6yU1sLnXiVzPw2CD4XgA8x50EfifJlz8XTPjBeP0klHUKt2uXmnisqO0KLk3fCb5bjOZ8k%2FeJl8zaXAgtRIALW9rzMF%2F8L7Pk3MOYwPRY4md1sDk5&connectionData=%5B%7B%22name%22%3A%22insuranceofferrequesthub%22%7D%5D&tid=9&_=1572505813840
This shows me all the data I need, but I cannot navigate to this page in selenium.
How do I tackle this problem?
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
chrome_options = Options()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
# enable browser logging
d = DesiredCapabilities.CHROME
d['loggingPrefs'] = { 'browser':'ALL' }
driver = webdriver.Chrome(desired_capabilities = d, options=chrome_options)
driver.fullscreen_window()
wait = WebDriverWait(driver,1)
driver.get("https://forsikringsguiden.dk/#!/bilforsikring/manuel")
#time.sleep(5)
#remove cookie bar
driver.find_element_by_id('cookieBarAccept').click()
maerke = driver.find_element_by_xpath('//*[#id="s2id_carSelectedMake"]/a').click()
driver.find_element_by_xpath('//*[#id="s2id_autogen1_search"]').send_keys("Hyundai")
driver.minimize_window()
driver.maximize_window()
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
model = driver.find_element_by_xpath('//*[#id="s2id_autogen2"]').click()
driver.find_element_by_xpath('//*[#id="s2id_autogen3_search"]').send_keys("i30")
driver.minimize_window()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
driver.execute_script("scrollBy(0,250)")
aargang = driver.find_element_by_xpath('//*[#id="s2id_autogen4"]/a').click()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="s2id_autogen5_search"]').send_keys("2009")
driver.minimize_window()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
driver.execute_script("scrollBy(0,250)")
motor_str = driver.find_element_by_xpath('//*[#id="s2id_autogen6"]/a').click()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="s2id_autogen7_search"]').send_keys("1,6")
driver.minimize_window()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
variant = driver.find_element_by_xpath('//*[#id="s2id_autogen8"]').click()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="s2id_autogen9_search"]').send_keys("1,6 CRDi 116HK 5d")
driver.minimize_window()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
driver.execute_script("scrollBy(0,250)")
godkend_oplysninger = driver.find_element_by_xpath('//*[#id="content"]/div[4]/form/div[6]/div/button').click()
#Om dig siden
driver.get("https://forsikringsguiden.dk/#!/bilforsikring/omdig")
alder = wait.until(EC.presence_of_element_located((By.XPATH,'//*[#id="content"]/div/div[2]/div[2]/form/div[1]/div[1]/div/input')))
alder.send_keys("50")
adresse = wait.until(EC.presence_of_element_located((By.XPATH,'//*[#id="adresse-autocomplete"]')))
adresse.send_keys("Havevang 8, 3. th, 4300 Holbæk", Keys.ENTER)
aar = wait.until(EC.presence_of_element_located((By.XPATH,'//*[#id="content"]/div/div[2]/div[2]/form/div[2]/div/div/input')))
aar.send_keys("10")
driver.execute_script("scrollBy(0,250)")
#Antal skader
driver.find_element_by_xpath('/html/body/div[6]/div/div[2]/div/div[2]/div[2]/form/div[3]/div/div/div[2]').click()
wait
driver.find_element_by_xpath('/html/body/div[11]/ul/li[3]').click()
driver.minimize_window()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
time.sleep(1)
#skade 1
driver.find_element_by_xpath('/html/body/div[6]/div/div[2]/div/div[2]/div[2]/form/div[4]/div/div[1]/div/div[2]').click()
wait
driver.find_element_by_xpath('/html/body/div[12]/ul/li[5]').click()
driver.minimize_window()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
time.sleep(1)
#skade 2
driver.find_element_by_xpath('/html/body/div[6]/div/div[2]/div/div[2]/div[2]/form/div[4]/div/div[2]/div/div[2]').click()
wait
driver.find_element_by_xpath('/html/body/div[13]/ul/li[3]').click()
driver.minimize_window()
driver.maximize_window()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="select2-drop"]').click()
time.sleep(1)
find_daekning = driver.find_element_by_xpath('//*[#id="content"]/div/div[2]/div[2]/form/div[5]/div/button').click()
EDIT:
I added this to my code using Selenium:
##### Get Data #####
driver.get("https://forsikringsguiden.dk/#!/bilforsikring/resultatside")
wait = WebDriverWait(driver,10)
wait
res_element = driver.find_elements_by_xpath('/html/body/div[7]/div/div[2]/div[1]/div[2]/div[2]')
res = [x.text for x in res_element]
print(res, "\n")
But it doesn't get me the numbers, just some of the text.
Here the result
['Sortér efter: Forklaring\nGå til selskab\nDin dækning\nkr./år -\nMed
samlerabat kr./år\nSelvrisiko\nSe detaljer\nSammenlign\nGå til selskab\nDin dækning\nkr./år -\nMed samlerabat kr./år\nSelvrisiko
\nSe detaljer\nSammenlign\nGå til selskab\nDin dækning\nkr./år -
\nMed samlerabat kr./år\nSelvrisiko\nSe detaljer\nSammenlign\n

Related

Selenium fails to scroll down

I am using Selenium to scrape data from here. The website is using some animation to show the sections after your scroll down. I am trying to scroll down to the footer and wait for the animation to get the data from the page.
Although I am not sure if that's the only approach that get me the data, cause I can see that the animation is only adding class aos-animate to the main class, and if that class is not in the HTML element, it wont get the text!
In the get_service_data function, I am trying to scroll down to the end of the page. I tried to scroll down before I start the loop.
I tried:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
html = driver.find_element(By.CLASS_NAME, 'html')
html.send_keys(Keys.END)
html.send_keys(Keys. PAGE_DOWN)
copyright = driver.find_element(By.CLASS_NAME, 'copyright')
driver.execute_script("arguments[0].scrollIntoView();", copyright)
Here is my full script:
import os
import time
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
language = "en" # to take this from the user
main_link = f"https://www.atlp.ae/{language}"
driver_path = os.path.join(os.getcwd(), "chromedriver")
# options = webdriver.ChromeOptions()
# options.headless = True
driver = webdriver.Chrome(driver_path) # options=options
driver.maximize_window()
def get_services_links():
links = []
driver.get(main_link)
services_header_xpath = '//*[#id="fixed-header"]/div/div[2]/div/nav/ul/li[5]/button'
driver.find_element(By.XPATH, services_header_xpath).click()
services_menu_xpath = '//*[#id="serviceInfotitle"]/nav/ul'
services_menu = driver.find_element(By.XPATH, services_menu_xpath)
options = services_menu.find_elements(By.TAG_NAME ,"li")
for option in options:
a_tag = option.find_element(By.TAG_NAME ,"a")
links.append(a_tag.get_attribute("href"))
return links[:-1] if len(links) > 0 else []
def get_service_data(link):
driver.get(link)
wait = WebDriverWait(driver, 10)
service_name_xpath = '//*[#id="main-scrollbar"]/div[1]/main/sc-placeholder/app-intro-section/section/div/div[1]/div[1]/div/p'
wait.until(EC.visibility_of_element_located((By.XPATH,service_name_xpath)))
service_name = driver.find_element(By.XPATH, service_name_xpath).text
print("Service Name: ", service_name)
# row serviceSubsetRow ng-star-inserted
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, 'ServiceSubsetWrapper')))
services_wrapper = driver.find_element(By.CLASS_NAME, 'ServiceSubsetWrapper')
container = services_wrapper.find_element(By.CLASS_NAME, 'container')
service_sections = container.find_elements(By.CLASS_NAME, 'serviceSubsetRow')
for service in service_sections:
textual_div = service.find_element(By.CLASS_NAME, 'textCol')
something = textual_div.find_element(By.CLASS_NAME, 'serviceSubsetTitle')
print("Text: ", something.text)
if __name__ == '__main__':
# try:
links = get_services_links()
for link in links:
get_service_data(link)
break
driver.quit()
What you need is this:
something.get_attribute('innerText') because, perhaps, due to the added animation, the regular text is not working.
Also, I have removed a few lines as I thought they were not needed (at least for this exercise). I have directly added a loop to make it work with serviceSubsetTitle
def get_service_data(link):
driver.get(link)
wait = WebDriverWait(driver, 10)
service_name_xpath = '//*[#id="main-scrollbar"]/div[1]/main/sc-placeholder/app-intro-section/section/div/div[1]/div[1]/div/p'
wait.until(EC.visibility_of_element_located((By.XPATH, service_name_xpath)))
service_name = driver.find_element(By.XPATH, service_name_xpath).text
print("Service Name: ", service_name)
# ---- removed these lines --------
# row serviceSubsetRow ng-star-inserted
# wait.until(EC.visibility_of_element_located((By.CLASS_NAME, 'ServiceSubsetWrapper')))
# services_wrapper = driver.find_element(By.CLASS_NAME, 'ServiceSubsetWrapper')
#
# container = services_wrapper.find_element(By.CLASS_NAME, 'container')
# service_sections = container.find_elements(By.CLASS_NAME, 'serviceSubsetRow')
# ----- End of lines removal ----------
# Clicking out the cookie acceptance button
try:
driver.find_element(By.XPATH, "//*[#class='cc-btn cc-allow']").click()
except:
print("nothing there")
# --- removed these lines
# for service in service_sections:
# textual_div = service.find_element(By.CLASS_NAME, 'textCol')
# time.sleep(3)
# --- end of lines removal ---------
# These are my lines here from below:
somethings = driver.find_elements(By.XPATH, "//*[contains(#class, 'serviceSubsetTitle')]")
print(len(somethings))
for something in somethings:
# time.sleep(2)
title_txt = something.get_attribute('innerText')
print(title_txt)
here is the output:
Service Name: Sea Services
5
Vessel Management and Marine Services
Passenger Handling and Cargo Operations
Issuance of Certificates and Approvals in Ports
Ports Licensing
Property Leasing Services - Ports
Process finished with exit code 0
This is one way of scrolling that page down:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
chrome_options = Options()
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('disable-notifications')
chrome_options.add_argument("window-size=1280,720")
webdriver_service = Service("chromedriver/chromedriver") ## path to where you saved chromedriver binary
browser = webdriver.Chrome(service=webdriver_service, options=chrome_options)
url = 'https://www.atlp.ae/en'
browser.get(url)
browser.execute_script('window.scrollBy(0, 100);')
cookie_b = WebDriverWait(browser, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[#aria-label='deny cookies']")))
cookie_b.click()
body = WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.ID, "main-scrollbar")))
body.click()
body.send_keys(Keys.END)
print('scrolled down')
Setup is chrome/chromedriver on linux, however it can be adapted to your system, just observe the imports, and the code after defining the browser/driver. Selenium docs: https://www.selenium.dev/documentation/

Python Selenium for getting the whole content of table in Reactjs

I tried to scrape the content of table from wyscout.com, which seems that is built by Reacjs.
After log in, script selects the country(e.g. England), League(e.g. Premier League), Team(e.g. Arsenal). Here choose Stats tab.
Then, it shows the table to scrape the data. Even if there is a button to export a excel file, I want to scrape the content manually using selenium or beautifulsoup.
However, script gets only 18 rows even though the number of rows on the table is more than 100.
Please let me know the solution.
Thanks.
Here is my code.
from re import search
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import string
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
# Load Chrome Browser
show_browser = True
options = Options()
# options.add_argument('--headless')
scraped_data = []
def bot_driver(url, user_name, user_password):
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.get(url)
driver.maximize_window()
time.sleep(2)
# Log in
# login = driver.find_element_by_xpath("//ul[#id='avia-menu']/li[5]/a")
# login.click()
time.sleep(10)
idd = driver.find_element_by_xpath("//input[#id='login_username']")
idd.send_keys(user_name)
passW = driver.find_element_by_xpath("//input[#id='login_password']")
passW.send_keys(user_password)
time.sleep(2)
submit = driver.find_element_by_xpath("//button[#id='login_button']")
submit.click()
time.sleep(10)
try:
force_login = driver.find_element_by_xpath("//button[#class='btn2_zFM sc-jDwBTQ cUKaFo -block3u2Qh -primary1dLZk']")
force_login.click()
print('force loging')
except :
print('force login error')
return driver
def select_country(driver, country_name):
# Specific Country
# country = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//div[#commandsource='list#area_list#30']")))
# country = driver.find_element_by_xpath("//div[#commandsource='list#area_list#30']")
# All the countries
list_country = WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By.XPATH, "//div[#id='detail_0_home_navy']/div[1]/div/div")))
time.sleep(3)
for entry in list_country:
if country_name == entry.text:
print('country click here')
entry.click()
return driver, 1
return driver, 0
def select_league(driver, league_name):
# Specific League
# league = driver.find_element_by_xpath("//div[#commandsource='list#competition_list#0']")
# All the leagues
list_league = driver.find_elements_by_xpath("//div[#id='detail_0_area_navy_0']/div[1]/div/div")
for entry in list_league:
if league_name == entry.text:
entry.click()
return driver, 1
return driver, 0
def select_team(driver, team_names):
# Specific Team
# team = driver.find_element_by_xpath("//div[#commandsource='list#team_list#0']")
flag_team = 0
list_team = driver.find_elements_by_xpath("//div[#id='detail_0_competition_navy_0']/div[1]/div/div")
for entry in list_team:
if entry.text in team_names:
flag_team = 1
print('selected team = ', entry.text)
entry.click()
time.sleep(2)
# Stats
stats = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, 'Stats')))
stats.click()
time.sleep(3)
WebDriverWait(driver,10).until(EC.visibility_of_element_located((By.XPATH, "//div[#id='detail_0_team_stats']/div/div/div/main/div[3]/div[2]/div/table")))
content_stats = driver.page_source
soup_stats = BeautifulSoup(content_stats, "html.parser")
table_stats = soup_stats.find('table', attrs={'class': 'teamstats__Index-module__table___1K93L teamstats__Index-module__with-opp___16Rp5'})
# print(table_stats)
tbody_stats = table_stats.find('tbody')
tr_stats = tbody_stats.find_all('tr')
print('number of tr = ', len(tr_stats))
# Return to team selection
back_team = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//div[#id='detail_0_team_back']")))
back_team.click()
time.sleep(5)
if flag_team == 1:
return driver, 1
else:
return driver, 0
if __name__ == "__main__":
# User input
# Login - wyscout_url = 'https://wyscout.com/'
wyscout_url = 'https://platform.wyscout.com/app/?/'
wyscout_user_name = '' # username
wyscout_user_password = '' # password
wyscout_driver = bot_driver(wyscout_url, wyscout_user_name, wyscout_user_password)
time.sleep(10)
# Select a Country
country = 'England' # .upper()
wyscout_driver, succeed = select_country(wyscout_driver, country)
if succeed == 0:
print('NO country!')
time.sleep(7)
# Select a league
league = 'Premier League' # .upper()
wyscout_driver, succeed = select_league(wyscout_driver, league)
if succeed == 0:
print('NO League!')
time.sleep(7)
# Select team
team_options = ['Arsenal']
wyscout_driver, succeed = select_team(wyscout_driver, team_options)
time.sleep(7)
if succeed == 0:
print('NO Team!')
time.sleep(7)
print('!!!Wyscout END!!!')
# wyscout_driver.quit()
Finally I figured it out by myself.
Here is my solution.
# Scroll down
print('scroll down')
last_height = driver.execute_script("return arguments[0].scrollHeight;", table_stats)
time.sleep(3)
while True:
driver.execute_script("arguments[0].scrollBy(0,arguments[0].scrollHeight)", table_stats)
time.sleep(5)
new_height = driver.execute_script("return arguments[0].scrollHeight;", table_stats)
if new_height == last_height:
break
last_height = new_height
print('scroll end')

How to solve Walmart Robot or Human challenge?

I have compiled a code that would Create an Account on https://www.walmart.com/ using selenium python. The code opens Walmart website goes to Create an account tab, fills the required details and click on Create Account button. However, the problem is Walmart's Human verification challenge which appears randomly at any stage. Following are the snapshots that shows the Human verification challenge appearing just after opening the URL or after clicking on create account button:
I have found a code on stackoverflow to bypass this challenge (shown below) but it didn;t work fro me.
element = driver.find_element(By.CSS_SELECTOR, '#px-captcha')
action = ActionChains(driver)
action.click_and_hold(element)
action.perform()
time.sleep(100)
action.release(element)
action.perform()
time.sleep(0.2)
action.release(element)
For reference my python code is as follows:
import time
import requests
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
url = "https://www.walmart.com/"
first_name = "chuza"
last_name = "789"
email_id = "chuza789#gmail.com"
password = "Eureka1#"
options = Options()
s=Service('C:/Users/Samiullah/.wdm/drivers/chromedriver/win32/96.0.4664.45/chromedriver.exe')
driver = webdriver.Chrome(service=s, options=options)
driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source":
"const newProto = navigator.__proto__;"
"delete newProto.webdriver;"
"navigator.__proto__ = newProto;"
})
wait = WebDriverWait(driver, 20)
actions = ActionChains(driver)
driver.get(url)
sign_in_btn = wait.until(EC.visibility_of_element_located((By.XPATH, "//div[text()='Sign In']")))
actions.move_to_element(sign_in_btn).perform()
time.sleep(0.5)
wait.until(EC.visibility_of_element_located((By.XPATH, '//button[normalize-space()="Create an account"]'))).click()
f_name = driver.find_element(By.ID, 'first-name-su')
l_name = driver.find_element(By.ID, 'last-name-su')
email = driver.find_element(By.ID, 'email-su')
pswd = driver.find_element(By.ID, 'password-su')
f_name.send_keys(first_name)
driver.implicitly_wait(2)
l_name.send_keys(last_name)
driver.implicitly_wait(2.5)
email.send_keys(email_id)
driver.implicitly_wait(3)
pswd.send_keys(password)
driver.implicitly_wait(2.8)
###
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button[data-automation-id='signup-submit-btn']"))).click()
Can anyone please guide me how to solve Walmart's Robot or Human? Challenge and how to integrate it with my existing code? Thanks in advance.
The simplest way to overcome this is to check for this element presence.
In case this element appears - perform the page reloading until this element is no more present.
Something like this:
human_dialog = driver.find_elements(By.XPATH, "//div[#aria-labelledby='ld_modalTitle_0']")
while human_dialog:
driver.refresh()
time.sleep(1)
human_dialog = driver.find_elements(By.XPATH, "//div[#aria-labelledby='ld_modalTitle_0']")

How to open and access multiple (nearly 50) tabs in Chrome using ChromeDriver and Selenium through Python

I'm trying to gather some information from certain webpages using selenium and python.I have a working code for a single tab. But now i have a situation where i need to open 50 tabs in chrome at once and process each page data.
1) So open 50 tabs at once - The code i got already
2) Change the control between tabs and process the information from the page and close the tab and move to next tab and do the same.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import datetime
final_results=[]
positions=[]
saerched_url=[]
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
#options.add_argument('--headless')
options.add_argument("—-incognito")
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=options)
browser.implicitly_wait(20)
#def db_connect():
try:
DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
TABLE_NAME = 'staging.search_url'
conn = psycopg2.connect(DSN)
print("Database connected...")
cur = conn.cursor()
cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
print('database connection failed')
quit()
def get_products(url):
browser.get(url)
names = browser.find_elements_by_xpath("//span[#class='pymv4e']")
upd_product_name_list=list(filter(None, names))
product_name = [x.text for x in upd_product_name_list]
product = [x for x in product_name if len(x.strip()) > 2]
upd_product_name_list.clear()
product_name.clear()
return product
links = ['https://www.google.com/search?q=Vitamin+D',
'https://www.google.com/search?q=Vitamin+D3',
'https://www.google.com/search?q=Vitamin+D+K2',
'https://www.google.com/search?q=D3',
'https://www.google.com/search?q=Vitamin+D+1000']
for link in links:
# optional: we can wait for the new tab to open by comparing window handles count before & after
tabs_count_before = len(browser.window_handles)
# open a link
control_string = "window.open('{0}')".format(link)
browser.execute_script(control_string)
# optional: wait for windows count to increment to ensure new tab is opened
WebDriverWait(browser, 1).until(lambda browser: tabs_count_before != len(browser.window_handles))
# get list of currently opened tabs
tabs_list = browser.window_handles
print(tabs_list)
# switch control to newly opened tab (the last one in the list)
last_tab_opened = tabs_list[len(tabs_list)-1]
browser.switch_to_window(last_tab_opened)
# now you can process data on the newly opened tab
print(browser.title)
for lists in tabs_list:
last_tab_opened = tabs_list[len(tabs_list)-1]
browser.switch_to_window(last_tab_opened)
filtered=[]
filtered.clear()
filtered = get_products(link)
saerched_url.clear()
if not filtered:
new_url=link+'+kaufen'
get_products(link)
print('Modified URL :'+link)
if filtered:
print(filtered)
positions.clear()
for x in range(1, len(filtered)+1):
positions.append(str(x))
saerched_url.append(link)
gobal_position=0
gobal_position=len(positions)
print('global postion first: '+str(gobal_position))
print("\n")
company_name_list = browser.find_elements_by_xpath("//div[#class='LbUacb']")
company = []
company.clear()
company = [x.text for x in company_name_list]
print('Company Name:')
print(company, '\n')
price_list = browser.find_elements_by_xpath("//div[#class='e10twf T4OwTb']")
price = []
price.clear()
price = [x.text for x in price_list]
print('Price:')
print(price)
print("\n")
urls=[]
urls.clear()
find_href = browser.find_elements_by_xpath("//a[#class='plantl pla-unit-single-clickable-target clickable-card']")
for my_href in find_href:
url_list=my_href.get_attribute("href")
urls.append(url_list)
print('Final Result: ')
result = zip(positions,filtered, urls, company,price,saerched_url)
final_results.clear()
final_results.append(tuple(result))
print(final_results)
print("\n")
print('global postion end :'+str(gobal_position))
i=0
try:
for d in final_results:
while i <= gobal_position:
print( d[i])
cur.execute("""INSERT into staging.pla_crawler_results(position, product_name, url,company,price,searched_url) VALUES (%s, %s, %s,%s, %s,%s)""", d[i])
print('Inserted succesfully')
conn.commit()
i=i+1
except (Exception, psycopg2.Error) as error:
print (error)
pass
browser.close()
Ideally you shouldn't attempt to open 50 tabs at once as:
Handling 50 concurrent TABs through Selenium will invite complicated logic/algorithm to maintain.
Additionally, you may run into CPU and memory usage issues as:
Chrome maintains many processes.
Where as at times Firefox uses too much RAM
Solution
If you are having a List of the urls as follows:
['https://selenium.dev/downloads/', 'https://selenium.dev/documentation/en/']
You can iterate over the list to open them one by one in the adjacent tab for scraping using the following Locator Strategy:
Code Block:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.keys import Keys
links = ['https://selenium.dev/downloads/', 'https://selenium.dev/documentation/en/']
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
for link in links:
driver = webdriver.Chrome(options=options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get(link)
print(driver.title)
print("Perform webscraping here")
driver.quit()
print("End of program")
Console Output:
Downloads
Perform webscraping here
The Selenium Browser Automation Project :: Documentation for Selenium
Perform webscraping here
End of program
Reference
You can find a relevant detailed discussion in:
WebScraping JavaScript-Rendered Content using Selenium in Python

How to Detect Popup and Close it Using Selenium Python (Google Chrome)

Hey I am scraping Shopify Review Shop Url, but while I am navigating from the search results, a pop up appears and I have no idea how to detect it and close it.
Here's my code
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
driver = webdriver.Chrome()
url='https://apps.shopify.com/sales-pop'
driver.get(url)
#Loop and Navigate Through the Search Results
page_number = 2
while True:
try:
link = driver.find_element_by_link_text(str(page_number))
except NoSuchElementException:
break
if page_number > 8:
timeout = 20
try:
WebDriverWait(driver,timeout).until(EC.visibility_of_element_located((By.XPATH,'//div[#title="close"]')))
except TimeoutException:
print("Timed out waiting for page to load")
driver.quit()
#Switch to the Popup
driver.switch_to_alert()
driver.find_element_by_xpath('//div[#title="close"]').click()
driver.implicitly_wait(5)
link.click()
print(driver.current_url)
page_number += 1
else:
driver.implicitly_wait(5)
link.click()
print(driver.current_url)
page_number += 1
#Scraping Rating
stars = driver.find_elements_by_xpath('//figure[#class="resourcesreviews-reviews-star"]')
starstars = []
for star in stars:
starstar=star.find_element_by_xpath('.//div/span')
starstars.append(starstar.get_attribute('class'))
#Scraping URL
urls = driver.find_elements_by_xpath('//figcaption[#class="clearfix"]')
titles=[]
for url in urls:
title=url.find_element_by_xpath('.//strong/a')
titles.append(title.get_attribute('href'))
#Print Titles and Rating Side by Side
for titless, starstarss in zip(titles, starstars):
print(titless + " " + starstarss)
You can just use WebDriverWaitandwindow_handles. Specifically, you can probably replace your #Switch to Popup section with something like:
WebDriverWait(driver, 5).until(lambda d: len(d.window_handles) == 2)
driver.switch_to_window(driver.window_handles[1]).close()
driver.switch_to_window(driver.window_handles[0])

Resources