Selenium attribute error "'WebDriver' object has no attribute '_timeout'" - selenium-chromedriver

from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from io import SEEK_END
import time
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
Path = "C:\Program Files (x86)\chromedriver.exe"
opt = Options()
opt.add_argument('--disable-blink-features=AutomationControlled')
opt.add_argument('--start-maximized')
opt.add_experimental_option("prefs", {
"profile.default_content_setting_values.media_stream_mic": 1,
"profile.default_content_setting_values.media_stream_camera": 1,
"profile.default_content_setting_values.geolocation": 0,
"profile.default_content_setting_values.notifications": 2
})
driver = webdriver.Chrome(options=opt , executable_path= Path)
with open("data2.txt", "r+") as f:
f.seek(0 , SEEK_END)
if f.tell() == 0:
MAILID = f.write("\n" + input("Enter your name_roll no (javets_6890) : "))
PASSWORD = f.write("\n" + input("Enter the password for your mail id : "))
else:
f.seek(0)
datalst = []
for i in f:
datalst.append(i.replace("\n",""))
print(datalst)
MAILID = datalst[0]
PASSWORD = datalst[1]
def methclass():
driver.get("https://classroom.google.com/u/0/h") #https://meet.google.com/lookup/bdzjc4fsl4?authuser=0&hs=179
wait = WebDriverWait.until(driver , 30)
try:
wait.until(EC.presence_of_element_located((By.NAME , "identifier")))
finally:
mailbox = driver.find_element_by_name("identifier") #looks for the mail id box in the sign in page
mailbox.send_keys(MAILID) #sends this text into the box for mail id
mailbox.send_keys(Keys.ENTER)
try:
wait.until(EC.presence_of_element_located((By.NAME , "password")))
finally:
passbox = driver.find_element_by_name("password") #looks for mail pass box
passbox.send_keys(PASSWORD) #sends pass to box
passbox.send_keys(Keys.ENTER)
time.sleep(10)
driver.get("https://meet.google.com/lookup/bdzjc4fsl4")
wait = WebDriverWait(driver , 180)
try:
wait.until(
EC.presence_of_element_located((By.CLASS_NAME, "Fxmcue"))) # sees whether join button is present or not
driver.refresh()
finally:
body = driver.find_element_by_xpath("//body")
body.send_keys(Keys.LEFT_CONTROL , "e")
body.send_keys(Keys.LEFT_CONTROL , "d")
join = driver.find_element_by_class_name("Fxmcue")
join.click()
time.sleep(2400)
driver.quit()
methclass()
How do I fix the attribute error?
The code ran fine until i added waits and try except blocks , i used time.sleep() instead of waits and the code ran just fine.
........................................................................................................................................................................................................................................................................................................................................................................................................

Your instantiation of wait object is wrong.
Instead of
wait = WebDriverWait.until(driver , 30)
use
wait = WebDriverWait(driver, 30)

Related

The PyQt5 Application Stop completely

I have a problem here with my Pyqt5 app when I try to add the user input to mysql database in the Add_car_info function. When I press the button to add the info the app stop working and gives me this error message. tested on windows.
here is the code:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.uic import loadUiType
from PyQt5 import QtWidgets, QtGui
import threading
import sys
import os
from os import path
import mysql.connector
import MySQLdb
#get the ui file
FORM_CLASS,_=loadUiType(path.join(path.dirname(__file__),"main.ui"))
class MainApp(QMainWindow, FORM_CLASS):
def __init__(self, parent=None):
super(MainApp,self).__init__(parent)
QMainWindow.__init__(self)
self.setupUi(self)
self.Handel_Ui()
self.Handel_DB_Connect()
self.Handel_Buttons()
self.Handel_DB_Connect()
def Handel_Ui(self):
self.setWindowTitle('Car_System')
self.tabWidget.tabBar().setVisible(False)
def Handel_DB_Connect(self):
db = mysql.connector.connect(database='mydb',
host='localhost',
user='root',
port='3309',
password='toor')
self.cur = db.cursor()
QApplication.processEvents()
def Handel_Buttons(self):
self.pushButton.clicked.connect(self.Add_car_info)
self.pushButton_2.clicked.connect(self.Update_car_info)
self.pushButton_3.clicked.connect(self.Delete_car_info)
self.pushButton_4.clicked.connect(self.Add_fuel_info)
self.pushButton_9.clicked.connect(self.Update_fuel_info)
self.pushButton_5.clicked.connect(self.Add_maintenance_info)
self.pushButton_6.clicked.connect(self.Update_maintenance_info)
self.pushButton_7.clicked.connect(self.Add_Licence_info)
self.pushButton_8.clicked.connect(self.Update_Licence_info)
self.pushButton_17.clicked.connect(self.Add_Revenus_info)
self.pushButton_18.clicked.connect(self.Update_Revenus_info)
self.pushButton_19.clicked.connect(self.Add_Rents_info)
self.pushButton_20.clicked.connect(self.Update_Rents_info)
self.pushButton_34.clicked.connect(self.Add_elewater_info)
self.pushButton_33.clicked.connect(self.Update_elewater_info)
def Add_car_info(self):
car_number = self.lineEdit_12.text()
owner_company = self.lineEdit_10.text()
branch = self.lineEdit_8.text()
service_mode = self.comboBox.currentIndex()
shaceh_number = self.lineEdit_4.text()
motor_number = self.lineEdit_2.text()
fuel_type = self.comboBox_2.currentIndex()
car_type = self.lineEdit_11.text()
car_model = self.lineEdit_9.text()
car_load = self.lineEdit_7.text()
car_wight = self.lineEdit_5.text
car_shape = self.lineEdit_3.text()
car_color = self.lineEdit.text()
self.cur.execute('''INSERT INTO car_info(car_number, owner_company, branch, service_mode, shaceh_number, motor_number, fuel_type, car_type, car_model, car_load, car_weight, car_shape, car_color)'
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)''' , (car_number,owner_company,branch,service_mode,shaceh_number,motor_number,fuel_type,car_type,car_model,car_load,car_wight,car_shape,car_color))
QApplication.processEvents()
print('Done')
def Update_car_info(self):
pass
def Delete_car_info(self):
pass
def Add_fuel_info(self):
pass
def Update_fuel_info(self):
pass
def Add_maintenance_info(self):
pass
def Update_maintenance_info(self):
pass
def Add_Licence_info(self):
pass
def Update_Licence_info(self):
pass
def Add_Revenus_info(self):
pass
def Update_Revenus_info(self):
pass
def Add_Rents_info(self):
pass
def Update_Rents_info(self):
pass
def Add_elewater_info(self):
pass
def Update_elewater_info(self):
pass
def main():
app = QApplication(sys.argv)
window = MainApp()
window.show()
app.exec_()
if __name__ == '__main__':
main()
thank you for your attantion and have a good day.

The Tensorflow Object_detection API 's visualize don't work

when I am using the API of Object_detection,I followed the instruction ,everything is fine .However ,when I begin to test my picture , I met a problem , it seems that the function named
" visualize_boxes_and_labels_on_image_array " ( in the 57 line ) didn't work . Here is my source codes
import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
class TOD(object):
def __init__(self):
self.PATH_TO_CKPT = '/home/xiyou/Desktop/ssd_training/result/frozen_inference_graph.pb'
self.PATH_TO_LABELS = '/home/xiyou/Desktop/ssd_training/detection_for_smoke.pbtxt'
self.NUM_CLASSES = 1
self.detection_graph = self._load_model()
self.category_index = self._load_label_map()
def _load_model(self):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def _load_label_map(self):
label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=self.NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def detect(self, image):
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
print(boxes, scores, classes, num_detections)
#print(np.squeeze(boxes))
# Visualization of the results of a detection.
#######Here is the problem
# image1 = vis_util.visualize_boxes_and_labels_on_image_array(
image, #######Here is the problem
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=50,
)
#print(np.squeeze(boxes),np.squeeze(classes))
cv2.namedWindow("detection")
cv2.imshow("detection", image1)
cv2.waitKey(0)
if __name__ == '__main__':
image = cv2.imread('/home/xiyou/Pictures/timg1.jpg')
detecotr = TOD()
detecotr.detect(image)
when I run this code , the image did show ,but nothing changed , no detected area in the pic and no an other informations . the input pic is the same as the out image . But when I was Debug , I found the Varibles such as soucres , classes , boxes do have values.
Is anyone can help me ? Thanks!!!
And my Tensorflow version is 1.4.0 , CUDA 8.0 in Ubuntu 16.04

Selenium in python is giving error

i have written a code in selenium using chrome driver, it codes works fine on some days and some days it gives error.Below is my code:
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import os
import time
import csv
driver = webdriver.Chrome("chromedriver.exe")
driver.get('https://maharerait.mahaonline.gov.in/searchlist/searchlist')
# try:
# element = WebDriverWait(driver, 100).until(
# EC.presence_of_element_located((By.ID, "Promoter"))
# )
# finally:
# print('0000000000000000000000')
# driver.quit()
time.sleep(1)
driver.find_element_by_id('Promoter').click()
divisionLength = len(Select(driver.find_element_by_id('Division')).options)
print('*********{}'.format(divisionLength))
firstRow = 0
titleRow = []
contentRows = []
gdistName = ""
gdivName = ""
for divisionElement in range(1,divisionLength):
selectDivision = Select(driver.find_element_by_id('Division'))
selectDivision.options
selectDivision.select_by_index(divisionElement)
time.sleep(1)
districtLength =
len(Select(driver.find_element_by_id('District')).options)
gdivName = (selectDivision.options)[divisionElement].text
while districtLength == 1:
print("43")
print(districtLength)
for districtElement in range(1,districtLength):
selectDistrict = Select(driver.find_element_by_id('District'))
selectDistrict.options
selectDistrict.select_by_index(districtElement)
gdistName = (selectDistrict.options)[districtElement].text
time.sleep(2)
projectLength =
len(Select(driver.find_element_by_id('Project')).options)
print('/------------------------------/')
print('/-----project number: {}-------/'.format(projectLength))
print('/------------------------------/')
if projectLength == 1:
continue
for projectElement in range(1,projectLength):
selectDistrict = Select(driver.find_element_by_id('District'))
selectDistrict.select_by_index(0)
selectDistrict.select_by_index(districtElement)
time.sleep(2)
gdistName = (selectDistrict.options)[districtElement].text
# selectProject.options
# while len(selectProject.options) == 1:
# print(len(selectProject.options))
# print("65")
# c = len(select.options)
# print('---------------{}'.format(c))
# titleRow = []
# contentRows = []
# firstRow = 0
# for i in range(1,c):
# select = Select(driver.find_element_by_id('Project'))
# while len(select.options) == 1:
# pass
selectProject = Select(driver.find_element_by_id('Project'))
time.sleep(2)
selectProject.select_by_index(projectElement)
driver.find_element_by_id('btnSearch').click()
tableRows =
driver.find_element_by_class_name('table').find_elements_by_tag_name('tr')
if firstRow == 0:
headRow = tableRows[0].find_elements_by_tag_name('th')
for headRowData in range(0,len(headRow)):
text =
headRow[headRowData].find_element_by_tag_name('span').text
titleRow.append(text)
firstRow = firstRow + 1
for dataRowsNumbers in range(1,len(tableRows)):
dataRow =
tableRows[dataRowsNumbers].find_elements_by_tag_name('td')
tempList = []
for dataRowContents in range(0,len(dataRow)):
try:
a_link =
dataRow[dataRowContents].find_element_by_tag_name('a').get_attribute('href')
tempList.append(str(a_link))
except NoSuchElementException:
tempList.append(str(dataRow[dataRowContents].text))
# if dataRow[dataRowContents].text == 'View':
# a_link =
dataRow[dataRowContents].find_element_by_tag_name('a').get_attribute('href')
# tempList.append(str(a_link))
# else:
#
tempList.append(str(dataRow[dataRowContents].text))
#print(dataRow[dataRowContents].text)
tempList.append(gdivName)
tempList.append(gdistName)
print(tempList)
contentRows.append(tempList)
# print('Automated check is over')
# print('Stored data in programs is as below:')
# print(contentRows)
with open("./data.csv",'w') as csvfile:
csvfile = csv.writer(csvfile, delimiter=',')
csvfile.writerow(titleRow)
csvfile.writerow("")
for i in range(0,len(contentRows)):
csvfile.writerow(contentRows[i])
driver.close()
Please excuse of intended spaces.
so i receive this error when i run it..
Traceback (most recent call last):
File "C:\Users\prince.bhatia\Desktop\Crawlers\Maha_Rera1.py", line 68, in
<module>
selectDistrict.select_by_index(districtElement)
File
"C:\Users\prince.bhatia\AppData\Local\Programs\Python\Python36\lib\site-
packages\selenium\webdriver\support\select.py", line 103, in select_by_index
raise NoSuchElementException("Could not locate element with index %d" %
index)
selenium.common.exceptions.NoSuchElementException: Message: Could not locate
element with index 2
Please , if someone can suggest me what to change , because it worked fine yesterday and not it is not working..It requires chrome driver to run
this is the website: https://maharerait.mahaonline.gov.in/searchlist/searchlist
try using select.select_by_value(districtElement) instead of index

Retrieving MySQL with Kivy

I have a Kivy code, where the output is:
I want to get replace the Box No. with strings retrieved from MySQL
So far I have tried to implement the MySQL to the python script:
class RemoveScreen(MyLayout):
def __init__(self,**kwargs):
db = MySQLdb.connect("localhost", "root", "[PASSWORD]", "tcs_microrage_crm")
cursor=db.cursor()
self.var = StringVar()
self.label1 = Label(self, text=0, textvariable=self.var)
myvar=str(self.var)
#http://stackoverflow.com/questions/775296/python-mysql-parameterized-queries
cursor.execute("SELECT part_name FROM stock_lists WHERE part_number = %s", (myvar))
self.myvar=StringVar()
self.myvar.set(cursor.fetchone())
self.label2 = Label(self, text=0, textvariable=myvar)
But this didn't work.
Q: How can I do MySQL queries and print individual strings in the kv file.
To show you how you could do that, I made a little search example.
This searches for fruit names in the database, and will output its name and price to the table.
from kivy.app import App
import MySQLdb
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
class DbCon:
def __init__(self):
self.db = MySQLdb.connect(user="root",passwd="pw",db="kivy")
self.c = self.db.cursor()
def get_rows(self,search = ""):
self.c.execute("SELECT * FROM fruit WHERE name REGEXP '.*%s.*' LIMIT 3" % search)
return self.c.fetchall()
class Table(BoxLayout):
def __init__(self,**kwargs):
super(Table,self).__init__(**kwargs)
self.orientation = "vertical"
self.search_field = BoxLayout(orientation="horizontal")
self.search_input = TextInput(text='search',multiline=False)
self.search_button = Button(text="search",on_press=self.search)
self.search_field.add_widget(self.search_input)
self.search_field.add_widget(self.search_button)
self.add_widget(self.search_field)
self.add_widget(Label(text="table"))
self.table = GridLayout(cols=2,rows=4)
self.table.add_widget(Label(text="Fruit"))
self.table.add_widget(Label(text="Price"))
self.rows = [[Label(text="item"),Label(text="price")],
[Label(text="item"),Label(text="price")],
[Label(text="item"),Label(text="price")]]
for item,price in self.rows:
self.table.add_widget(item)
self.table.add_widget(price)
self.add_widget(self.table)
self.db = DbCon()
self.update_table()
def update_table(self,search=""):
for index,row in enumerate(self.db.get_rows(search)):
self.rows[index][0].text = row[1]
self.rows[index][1].text = str(row[2])
def clear_table(self):
for index in range(3):
self.rows[index][0].text = ""
self.rows[index][1].text = ""
def search(self, *args):
self.clear_table()
self.update_table(self.search_input.text)
class MyApp(App):
def build(self):
return Table()
MyApp().run()

Storing scraped data with SCRAPY in MySQL database

i'm new here, it's the first time i'm using scrapy and i really need help. I know that this was asked before and i did try a lot of solutions but none of them works.
My pipelines file:
import sys
import MySQLdb
import hashlib
from scrapy.exceptions import NotConfigured
from scrapy.exceptions import DropItem
from scrapy.http import Request
from projetpfe.items import ProjetpfeItem
class MySQLStorePipeline(object):
def __init__(self):
try:
self.conn= MySQLdb.connect(user='root', passwd='root123', host='localhost', db='pressebam', use_unicode=True, charset='utf8')
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS scrapeddata2( idscrapedData INT NOT NULL AUTO_INCREMENT PRIMARY KEY, nomOrganePresse VARCHAR(200), titreArticle VARCHAR(200), url VARCHAR(200), nomJournaliste VARCHAR(200), jour VARCHAR(100), annee VARCHAR(100), categorie VARCHAR(100), contenuArticle VARCHAR(5000), lienImage VARCHAR(200)) ")
self.conn.commit()
except (AttributeError, MySQLdb.OperationalError), e:
raise e
def process_item(self, item, spider):
try:
self.cursor.execute( "INSERT INTO scrapeddata2 ( nomOrganePresse, titreArticle, url, jour, contenuArticle, lienImage) VALUES (%s, %s, %s,%s,%s, %s)",
(item['OrganePresse'],
item['Titre'],
item['URL'],
item['Jour'],
item['Contenu'],
item['LienImage'] ))
self.conn.commit()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return item
And this is my spider file
import urlparse
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from projetpfe.items import ProjetpfeItem
class ProjetpfeSpider(CrawlSpider):
name = 'telquel'
start_urls = ['http://telquel.ma'] # urls from which the spider will start crawling
rules = [Rule(SgmlLinkExtractor(allow=[r'page/\d+']), follow=True),
# r'page/\d+' : regular expression for http://telquelpage/X URLs
Rule(SgmlLinkExtractor(allow=[r'\d{4}/\d{2}/\d{2}/\w+']), callback='parse_telquel')]
# r'\d{4}/\d{2}/\w+' : regular expression for http://telquel.ma/YYYY/MM/title URLs
def parse_telquel(self, response):
hxs = HtmlXPathSelector(response)
item = ProjetpfeItem()
# XPath selector for title
item['Titre'] = hxs.select("//h1[#class='article-title']/text()").extract()
item['LienImage'] = hxs.select("//div[#class='main-article-content']//img[#class='setborder']/#src").extract()
item['OrganePresse'] = hxs.select("//img[#class='logo']/#alt").extract()
item['Jour'] = hxs.select("//div[#class='calendar-date']/text()").extract()
item['Contenu'] = hxs.select("//div[#class='shortcode-content']").extract()
item['URL'] = hxs.select("/html/head/link[5]/#href").extract()
return item
This is the settings file
BOT_NAME = 'projetpfe'
SPIDER_MODULES = ['projetpfe.spiders']
NEWSPIDER_MODULE = 'projetpfe.spiders'
ITEM_PIPELINES = {'projetpfe.pipelines.MySQLStorePipeline' : 300}
and finally my items
from scrapy.item import Item, Field
class ProjetpfeItem(Item):
OrganePresse = Field()
Titre = Field()
Journaliste = Field()
Jour = Field()
Annee = Field()
Categorie = Field()
Contenu = Field()
LienImage = Field()
URL = Field()
So the spider works fine but nada is stored in the database. HELP!!!