I want to click on the check box, but my code is not working. Steps which I want to follow is
Open website
Select Profession e.g:- Dental
License Type e.g :- Dentist
Enter a alphabet with * to get all the records
Click on the check box
Click on search button
Script also ask for image verification for which I am unable to process, any help would be appriciated
home_page = 'https://forms.nh.gov/licenseverification/'
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import csv
from shutil import copyfile
import datetime
import subprocess
import time
import multiprocessing
import sys
import subprocess
current_date=datetime.date.today()
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.options import Options
driver = webdriver.Chrome(r"C:\Users\psingh\AppData\Local\Programs\Python\Python38-32\chromedriver.exe")
driver.get(home_page)
select_prof = WebDriverWait(driver, 30).until(
EC.presence_of_element_located((By.ID, "t_web_lookup__profession_name")))
Select(select_prof).select_by_value('Dental')
select_lic_type = WebDriverWait(driver, 30).until(
EC.presence_of_element_located((By.ID, "t_web_lookup__license_type_name")))
# select profession criteria value
Select(select_lic_type).select_by_value('Dentist')
time.sleep(1)
send = WebDriverWait(driver, 30).until(
EC.presence_of_element_located((By.ID, "t_web_lookup__last_name"))
)
send.send_keys('A*')
time.sleep(1)
# click on check box
driver.find_element_by_xpath("/html/body/div[1]").click()
# click search button
driver.find_element_by_id("sch_button").click()
# wait to get the result
time.sleep(1)
The check box which you are trying to click is inside the iframe. First switch to that iframe and then try to click on that checkbox.
Below code works fine in pycharm and windows 10.
driver = webdriver.Chrome(PATH)
url = 'https://forms.nh.gov/licenseverification/'
driver.get(url)
driver.maximize_window()
sleep(5)
select_prof = driver.find_element(By.ID, 't_web_lookup__profession_name')
Select(select_prof).select_by_value('Dental')
select_lic_type = driver.find_element(By.ID, "t_web_lookup__license_type_name")
# select profession criteria value
Select(select_lic_type).select_by_value('Dentist')
sleep(1)
send = driver.find_element(By.ID, "t_web_lookup__last_name")
send.send_keys('A*')
sleep(1)
# click on check box
driver.switch_to.frame(0)
captcha = driver.find_element(By.CSS_SELECTOR, '.recaptcha-checkbox-border')
captcha.click()
# click search button
driver.find_element(By.XPATH, "sch_button").click()
sleep(1)
Related
import urllib3
import certifi
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
from bs4 import BeautifulSoup
import time
import ssl
http = urllib3.PoolManager(ca_certs=certifi.where())
chrome_options = Options()
chrome_options.add_argument("--incognito")
driver = webdriver.Chrome(options=chrome_options, executable_path="D:\\python works\\driver\\chromedriver.exe")
URL= "https://physicians.wustl.edu/"
driver.get(URL)
time.sleep(5)
driver.find_element_by_link_text("Find a Doctor").click()
find_doc = driver.current_url
print(find_doc)
driver.get(find_doc)
# content = driver.page_source
# print(content)
response = http.request('GET', find_doc)
url_text = response.data #text
time.sleep(10)
count = len(driver.find_elements_by_xpath("//span[#class='entry-title-link']"))
print(count)
s = driver.find_element_by_css_selector("span[class='entry-title-link']") #firstpage click
s.click()
urls = []
provider = []
print(driver.current_url)
urls.append(driver.current_url)
name = driver.find_element_by_css_selector("h1[class='washu-ppi-name entry-title']").text
print(name)
provider.append(name)
specialization = driver.find_element_by_css_selector("ul[class='wuphys-specialties']").text
print(specialization)
location= driver.find_element_by_css_selector("a[class='wuphys-addr name']").text
print(location)
time.sleep(5)
driver.find_element_by_css_selector("a[href='https://physicians.wustl.edu/find-a-doctor/']").click()
time.sleep(10)
I have same classname of span but I need to loop the same class name but the div is different. In the url there is doctors name with details after click I get details and I need to move to next doctor which has same class name
I think you are looking for something of this kind (to loop through all the doctor links and get info from there). Here I have written a basic action which you can scale to add more data related to each doctor.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(options=chrome_options, executable_path="D:\\python works\\driver\\chromedriver.exe")
driver.maximize_window()
driver.get("https://physicians.wustl.edu/")
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.LINK_TEXT, "Find a Doctor"))).click()
print(driver.current_url)
doc_cnt = WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located((By.XPATH, "//span[#class='entry-title-link']")))
print(len(doc_cnt))
doc_list=[] # to append all the doctor urls into the list for further processing, if required.
for doc in doc_cnt:
ActionChains(driver).key_down(Keys.CONTROL).click(doc).key_up(Keys.CONTROL).perform()
driver.switch_to.window(driver.window_handles[1])
doc_list.append(driver.current_url)
# ... you could include any code of yours related to each doctor here...
# After this one the tab terminates and a new doctor link would open
driver.close()
driver.switch_to.window(driver.window_handles[0])
time.sleep(1)
print(doc_list)
I'm trying to scrape from the moneycontrol.com. When I tried to send value in the search box I keep getting the same error in except block as "Element not Found".
I tried using XPath id as well as using the full XPath but in both cases, it doesn't work.
WITHOUT MAXIMIZING THE WINDOW
XPath id - //*[#id="search_str"]
Full XPath - /html/body/div[1]/header/div[1]/div[1]/div/div/div[2]/div/div/form/input[5]
Attaching the full code below:
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
def search_stock():
driver = webdriver.Chrome(
r'./chromedriver')
driver.get('https://www.moneycontrol.com/')
time.sleep(5)
search_icon = driver.find_element_by_xpath(
'//*[#id="fixedheader"]/div[4]/span')
search_icon.click()
time.sleep(2)
try:
search_box = driver.find_element_by_xpath('//*[#id="search_str"]')
print("Element is visible? " + str(search_box.is_displayed()))
time.sleep(10)
if search_box.is_displayed():
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
except NoSuchElementException:
print("Element not found")
driver.close()
search_stock()
Sometimes, it started working but most of the time it throwing exceptions and errors. Struggling since 3 days but none of the solutions working.
web scraping like that seems quite inefficient it is prob better to use requests and bs4. However if you want to do it like this you could try using action chains. found here Or you can do driver.get('https://www.moneycontrol.com/india/stockpricequote/consumer-food/zomato/Z') from the start instead of typing it in.
You may wanna try the below code :
def search_stock():
driver = webdriver.Chrome(r'./chromedriver')
driver.maximize_window()
driver.implicitly_wait(30)
driver.get('https://www.moneycontrol.com/')
wait = WebDriverWait(driver, 10)
time.sleep(5)
try:
ActionChains(driver).move_to_element(wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[id='search_str']")))).perform()
search_box = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[id='search_str']")))
print("Element is visible? ", search_box.is_displayed())
time.sleep(10)
if search_box.is_displayed():
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
except NoSuchElementException:
print("Element not found")
Imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
Try clicking on search_box and only after that sending text there.
search_box = driver.find_element_by_xpath('//form[#id="form_topsearch"]//input[#id="search_str"]')
search_box.click()
time.sleep(0.1)
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
Additionally I would advise you using explicit waits of expected conditions instead of hardcoded sleeps.
With it your code will be faster and more reliable.
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def search_stock():
driver = webdriver.Chrome(r'./chromedriver')
wait = WebDriverWait(driver, 20)
driver.get('https://www.moneycontrol.com/')
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="fixedheader"]/div[4]/span')).click()
search_box = wait.until(EC.element_to_be_clickable((By.XPATH, '//form[#id="form_topsearch"]//input[#id="search_str"]')))
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
#I'm not sure you should close the driver immediately after involving searching....
#driver.close()
search_stock()
UPD
Let's try this
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
def search_stock():
driver = webdriver.Chrome(r'./chromedriver')
wait = WebDriverWait(driver, 20)
actions = ActionChains(driver)
driver.get('https://www.moneycontrol.com/')
search_icon = wait.until(EC.presence_of_element_located((By.XPATH, '//*[#id="fixedheader"]/div[4]/span')).click()
time.sleep(0.5)
driver.execute_script("arguments[0].scrollIntoView();", search_icon)
driver.execute_script("arguments[0].click();", search_icon)
search_box = wait.until(EC.presence_of_element_located((By.XPATH, '//form[#id="form_topsearch"]//input[#id="search_str"]')))
driver.execute_script("arguments[0].scrollIntoView();", search_icon)
driver.execute_script("arguments[0].click();", search_icon)
time.sleep(0.5)
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
#I'm not sure you should close the driver immediately after involving searching....
#driver.close()
search_stock()
If the above solution is still not working instead of
actions.move_to_element(search_box).click().perform()
try
driver.execute_script("arguments[0].click();", search_box)
I'm trying to scrape a website with show more button; and I'm not able to click on it.
The website is: https://www.wtatennis.com/rankings/singles
And my code is:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver import ActionChains
from tqdm import tqdm
import time
options = Options()
options.add_argument("--headless")
browser = webdriver.Chrome(ChromeDriverManager().install(),options=options)
browser.get('https://www.wtatennis.com/rankings/singles')
action = ActionChains(browser)
showmore = browser.find_elements_by_xpath(".//button[contains(#class, 'btn widget-footer__more-button rankings__show-more js-show-more-button')]")
action.move_to_element(showmore).perform()
showmore.click()
time.sleep(5)
Has anyone any idea? Thanks!
Don't use './/' in your locator when you are starting the search from root, as there is no current element your locator won't find any element. Also you can use any attribute to find elements uniquely. see below code:
browser = webdriver.Chrome(options=options)
browser.get('https://www.wtatennis.com/rankings/singles')
WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text="Accept Cookies"]'))).click()
WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text = "Show More"]'))).click()
use webdriver wait and data attributes
tu use wait import:
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
To wait till all elements are loaded you have to make sure last element is not changing , if its changing keep scrolling .
browser.get('https://www.wtatennis.com/rankings/singles')
WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text="Accept Cookies"]'))).click()
value = "start"
WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text = "Show More"]'))).click()
while(browser.find_element_by_xpath("(//tr[#class='rankings__row'])[last()]").text != value):
elem = browser.find_element_by_xpath(
'(//*[contains(text(),"Loading")])[2]')
value = browser.find_element_by_xpath(
"(//tr[#class='rankings__row'])[last()]").text
browser.execute_script("arguments[0].scrollIntoView()", elem)
WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.XPATH,
"//tr[#class='rankings__row']")))
try:
WebDriverWait(browser, 10).until_not(EC.text_to_be_present_in_element((By.XPATH,
"(//tr[#class='rankings__row'])[last()]"), value))
except:
None
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import csv
import tkinter as tk
from tkinter import filedialog
import re
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
with open(file_path, 'r') as f:
list = f.readlines()
for items in list:
driver = webdriver.Chrome('C:/util/chromedriver_win32/chromedriver.exe')
driver.get(items)
time.sleep(3)
element = driver.find_element_by_xpath("//button[contains(.,'Show all')]").click();
website being used: https://www.dell.com/support/home/en-us/product-support/product/precision-15-5520-laptop/drivers
I am trying to get it so that I can count the number of a particular button class and then click them all based upon the number value (added picture of one):
class='details-control js-details-toggle pointer-cursor btn collapse-toggle table-collapse-toggle js-collapse-toggle collapsed'
I got this to work, but it skips some of the buttons:
element2 = driver.find_elements_by_xpath("//button[#class='details-control js-details-toggle pointer-cursor btn collapse-toggle table-collapse-toggle js-collapse-toggle collapsed']")
for x in range(0,len(element2)):
if element2[x].is_displayed():
element2[x].click()
Give this a go:
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH,"//button[contains(.,'Show all')]"))).click()
my_buttons=driver.find_elements_by_xpath("//button[#class='details-control js-details-toggle pointer-cursor btn collapse-toggle table-collapse-toggle js-collapse-toggle collapsed']")
print(f"Found {len(my_buttons)} buttons")
for button in my_buttons:
driver.execute_script("arguments[0].scrollIntoView()", button)
button.click()
You will need these imports for WebDriverWait (which is the preferred option over time.sleep):
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
Working in python, selenium, Win 7, I want to click on sowcomment button after it is able to clickable that is located on this webPage, handled by wait then I want to click on showmore Comments button to see more comments in order to scrape more comments. After first button, i am able to extract comments.
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import selenium.common.exceptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import selenium.webdriver.support.ui as UI
driver = webdriver.Firefox()
driver.get("http://www.aljazeera.com/news/2016/07/erdogan-west-mind-business-160729205425215.html")
wait = UI.WebDriverWait(driver, 10)
next_page_link = wait.until(
EC.element_to_be_clickable((By.ID, 'showcomment')))
next_page_link.click()
wait = UI.WebDriverWait(driver, 20)
next_page_link2 = wait.until(
EC.element_to_be_clickable((By.LINK_TEXT, 'Show more comments')))
next_page_link2.click()
v = driver.find_elements_by_class_name('gig-comment-body')
print(v)
for h in v:
print(h.text)
but second button is unable to click rather giving the exception:
selenium.common.exceptions.TimeoutException:
What is the problem?
I think you should try using execute_script() to perform click as below :
next_page_link2 = wait.until(
EC.element_to_be_clickable((By.XPATH, '//div[contains(text(),"Show more comments")]')))
#now click this using execute_script
driver.execute_script("arguments[0].click()", next_page_link2)
Hope it helps...:)