NameError: name 'Logi' is not defined with Sikuli - sikuli

I'm just trying Sikuli. I'm trying to have a "main" page that call others files containing some classes and some def. But I get following error:
[error] Arrêté [error] Une erreur est survenue à la ligne 13 [error] Message d'erreur : Traceback (most recent call last): File "C:\Users\gregory\AppData\Local\Temp\sikuli-tmp2607956245912033896.py", line 13, in log = Logi() NameError: name 'Logi' is not defined
I don't really know why.
My Code
main.sikuli
# Path to def
myScriptPath = "C:\\NOT_SCANNED\\Stockage\\SikuliProject\\"
if not myScriptPath in sys.path: sys.path.append(myScriptPath)
# Import File.sikuli
from loginLogout import *
from sikuli.Sikuli import *
# Call Def
if __name__ == "__main__":
log = Logi()
log.login()
log.logout()
loginLogout.sikuli
from sikuli.Sikuli import *
class Logi:
def login(self):
openApp("MyApp")
wait(5)
type("demo" + Key.TAB + "demo" + Key.TAB)
type("a", KEY_CTRL)
type("localhost")
click( )
wait(5)
wait( )
I noticed someting. IF I named my class foo it works. I don't really understand.
Thanks in advance for your help.
OK I found something. I made something wrong with naming my files. Problem seems to have been fixed with made a save as and now I do not have this problem anymore. But I have another one. Now I did not get any error when I execute it but nothing is executed ...
New code:
main
# -*-coding:Latin-1 -*
# Path to def
myScriptPath = "C:\\NOT_SCANNED\\Stockage\\SikuliProject"
if not myScriptPath in sys.path: sys.path.append(myScriptPath)
# Import File.sikuli from sikuli.Sikuli import * from Logi import *
# Call Def
if __name__ == "__main__":
log = Logi()
log.login
Logi
from sikuli.Sikuli import *
class Logi:
def login(self):
openApp("MYAPP")
wait(5)
type("demo" + Key.TAB + "demo" + Key.TAB)
type("a", KEY_CTRL)
type("localhost")
click( )
wait(5)
wait( )
def logout(self):
click( )
wait( )
click( )
def openNewTab(self):
click( )
def createNewSingle(self):
click( )
click( )
rightClick( )
click( )
click( )
wait( )
click( )
type("test")
click( )
type("this is a test with Sikuli")
click( )
rightClick( )
click( )
click( )
wait( )
click( )
Thanks in advance for your help :)

You have added the script path but not the script itself. In your main add:
import Logi
reload(Logi)

Related

The PyQt5 Application Stop completely

I have a problem here with my Pyqt5 app when I try to add the user input to mysql database in the Add_car_info function. When I press the button to add the info the app stop working and gives me this error message. tested on windows.
here is the code:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.uic import loadUiType
from PyQt5 import QtWidgets, QtGui
import threading
import sys
import os
from os import path
import mysql.connector
import MySQLdb
#get the ui file
FORM_CLASS,_=loadUiType(path.join(path.dirname(__file__),"main.ui"))
class MainApp(QMainWindow, FORM_CLASS):
def __init__(self, parent=None):
super(MainApp,self).__init__(parent)
QMainWindow.__init__(self)
self.setupUi(self)
self.Handel_Ui()
self.Handel_DB_Connect()
self.Handel_Buttons()
self.Handel_DB_Connect()
def Handel_Ui(self):
self.setWindowTitle('Car_System')
self.tabWidget.tabBar().setVisible(False)
def Handel_DB_Connect(self):
db = mysql.connector.connect(database='mydb',
host='localhost',
user='root',
port='3309',
password='toor')
self.cur = db.cursor()
QApplication.processEvents()
def Handel_Buttons(self):
self.pushButton.clicked.connect(self.Add_car_info)
self.pushButton_2.clicked.connect(self.Update_car_info)
self.pushButton_3.clicked.connect(self.Delete_car_info)
self.pushButton_4.clicked.connect(self.Add_fuel_info)
self.pushButton_9.clicked.connect(self.Update_fuel_info)
self.pushButton_5.clicked.connect(self.Add_maintenance_info)
self.pushButton_6.clicked.connect(self.Update_maintenance_info)
self.pushButton_7.clicked.connect(self.Add_Licence_info)
self.pushButton_8.clicked.connect(self.Update_Licence_info)
self.pushButton_17.clicked.connect(self.Add_Revenus_info)
self.pushButton_18.clicked.connect(self.Update_Revenus_info)
self.pushButton_19.clicked.connect(self.Add_Rents_info)
self.pushButton_20.clicked.connect(self.Update_Rents_info)
self.pushButton_34.clicked.connect(self.Add_elewater_info)
self.pushButton_33.clicked.connect(self.Update_elewater_info)
def Add_car_info(self):
car_number = self.lineEdit_12.text()
owner_company = self.lineEdit_10.text()
branch = self.lineEdit_8.text()
service_mode = self.comboBox.currentIndex()
shaceh_number = self.lineEdit_4.text()
motor_number = self.lineEdit_2.text()
fuel_type = self.comboBox_2.currentIndex()
car_type = self.lineEdit_11.text()
car_model = self.lineEdit_9.text()
car_load = self.lineEdit_7.text()
car_wight = self.lineEdit_5.text
car_shape = self.lineEdit_3.text()
car_color = self.lineEdit.text()
self.cur.execute('''INSERT INTO car_info(car_number, owner_company, branch, service_mode, shaceh_number, motor_number, fuel_type, car_type, car_model, car_load, car_weight, car_shape, car_color)'
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)''' , (car_number,owner_company,branch,service_mode,shaceh_number,motor_number,fuel_type,car_type,car_model,car_load,car_wight,car_shape,car_color))
QApplication.processEvents()
print('Done')
def Update_car_info(self):
pass
def Delete_car_info(self):
pass
def Add_fuel_info(self):
pass
def Update_fuel_info(self):
pass
def Add_maintenance_info(self):
pass
def Update_maintenance_info(self):
pass
def Add_Licence_info(self):
pass
def Update_Licence_info(self):
pass
def Add_Revenus_info(self):
pass
def Update_Revenus_info(self):
pass
def Add_Rents_info(self):
pass
def Update_Rents_info(self):
pass
def Add_elewater_info(self):
pass
def Update_elewater_info(self):
pass
def main():
app = QApplication(sys.argv)
window = MainApp()
window.show()
app.exec_()
if __name__ == '__main__':
main()
thank you for your attantion and have a good day.

The Tensorflow Object_detection API 's visualize don't work

when I am using the API of Object_detection,I followed the instruction ,everything is fine .However ,when I begin to test my picture , I met a problem , it seems that the function named
" visualize_boxes_and_labels_on_image_array " ( in the 57 line ) didn't work . Here is my source codes
import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
class TOD(object):
def __init__(self):
self.PATH_TO_CKPT = '/home/xiyou/Desktop/ssd_training/result/frozen_inference_graph.pb'
self.PATH_TO_LABELS = '/home/xiyou/Desktop/ssd_training/detection_for_smoke.pbtxt'
self.NUM_CLASSES = 1
self.detection_graph = self._load_model()
self.category_index = self._load_label_map()
def _load_model(self):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def _load_label_map(self):
label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=self.NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def detect(self, image):
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
print(boxes, scores, classes, num_detections)
#print(np.squeeze(boxes))
# Visualization of the results of a detection.
#######Here is the problem
# image1 = vis_util.visualize_boxes_and_labels_on_image_array(
image, #######Here is the problem
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=50,
)
#print(np.squeeze(boxes),np.squeeze(classes))
cv2.namedWindow("detection")
cv2.imshow("detection", image1)
cv2.waitKey(0)
if __name__ == '__main__':
image = cv2.imread('/home/xiyou/Pictures/timg1.jpg')
detecotr = TOD()
detecotr.detect(image)
when I run this code , the image did show ,but nothing changed , no detected area in the pic and no an other informations . the input pic is the same as the out image . But when I was Debug , I found the Varibles such as soucres , classes , boxes do have values.
Is anyone can help me ? Thanks!!!
And my Tensorflow version is 1.4.0 , CUDA 8.0 in Ubuntu 16.04

Does IOError: [Errno 2] No such file or directory: mean the file hasn't been written?

I'm using Tweepy for the first time. Currently getting this error
---------------------------------------------------------------------------
IOError Traceback (most recent call last)
<ipython-input-11-cdd7ebe0c00f> in <module>()
----> 1 data_json = io.open('raw_tweets.json', mode='r', encoding='utf-8').read() #reads in the JSON file
2 data_python = json.loads(data_json)
3
4 csv_out = io.open('tweets_out_utf8.csv', mode='w', encoding='utf-8') #opens csv file
IOError: [Errno 2] No such file or directory: 'raw_tweets.json'
I've got a feeling that the code I've got isn't working. For example print(status) doesn't print anything. Also I see no saved CSV or JSON file in the directory.
I'm a newbie so any help/documentation you can offer would be great!
import time
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import os
import json
import csv
import io
from pymongo import MongoClient
ckey = 'blah'
consumer_secret = 'blah'
access_token_key = 'blah'
access_token_secret = 'blah'
#start_time = time.time() #grabs the system time
keyword_list = ['keyword'] #track list
#Listener Class Override
class listener(StreamListener):
def __init__(self, start_time, time_limit=60):
self.time = start_time
self.limit = time_limit
self.tweet_data = []
def on_data(self, data):
saveFile = io.open('raw_tweets.json', 'a', encoding='utf-8')
while (time.time() - self.time) < self.limit:
try:
self.tweet_data.append(data)
return True
except BaseException, e:
print 'failed ondata,', str(e)
time.sleep(5)
pass
saveFile = io.open('raw_tweets.json', 'w', encoding='utf-8')
saveFile.write(u'[\n')
saveFile.write(','.join(self.tweet_data))
saveFile.write(u'\n]')
saveFile.close()
exit()
def on_error(self, status):
print status
class listener(StreamListener):
def __init__(self, start_time, time_limit=10):
self.time = start_time
self.limit = time_limit
def on_data(self, data):
while (time.time() - self.time) < self.limit:
print(data)
try:
client = MongoClient('blah', 27017)
db = client['blah']
collection = db['blah']
tweet = json.loads(data)
collection.insert(tweet)
return True
except BaseException as e:
print('failed ondata,')
print(str(e))
time.sleep(5)
pass
exit()
def on_error(self, status):
print(status)
data_json = io.open('raw_tweets.json', mode='r', encoding='utf-8').read() #reads in the JSON file
data_python = json.loads(data_json)
csv_out = io.open('tweets_out_utf8.csv', mode='w', encoding='utf-8') #opens csv file
UPDATED: Creates file but file is empty
import tweepy
import datetime
auth = tweepy.OAuthHandler('xxx', 'xxx')
auth.set_access_token('xxx', 'xxx')
class listener(tweepy.StreamListener):
def __init__(self, timeout, file_name, *args, **kwargs):
super(listener, self).__init__(*args, **kwargs)
self.start_time = None
self.timeout = timeout
self.file_name = file_name
self.tweet_data = []
def on_data(self, data):
if self.start_time is None:
self.start_time = datetime.datetime.now()
while (datetime.datetime.now() - self.start_time).seconds < self.timeout:
with open(self.file_name, 'a') as data_file:
data_file.write('\n')
data_file.write(data)
def on_error(self, status):
print status
l = listener(60, 'stack_raw_tweets.json')
mstream = tweepy.Stream(auth=auth, listener=l)
mstream.filter(track=['python'], async=True)
You are not creating a Stream for the listener. The last but one line of the code below does that. Followed by that you have to start the Stream, which is the last line. I must warn you that storing this in mongodb is the right thing to do as the file that I am storing it seems to grow easily to several GB. Also the file is not exactly a json. Each line in the file is a json. You must tweak it to your needs.
import tweepy
import datetime
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
class listener(tweepy.StreamListener):
def __init__(self, timeout, file_name, *args, **kwargs):
super(listener, self).__init__(*args, **kwargs)
self.start_time = None
self.timeout = timeout
self.file_name = file_name
self.tweet_data = []
def on_data(self, data):
if self.start_time is None:
self.start_time = datetime.datetime.now()
while (datetime.datetime.now() - self.start_time).seconds < self.timeout:
with open(self.file_name, 'a') as data_file:
data_file.write('\n')
data_file.write(data)
def on_error(self, status):
print status
l = listener(60, 'raw_tweets.json')
mstream = tweepy.Stream(auth=auth, listener=l)
mstream.filter(track=['python'], async=True)

Storing scraped data with SCRAPY in MySQL database

i'm new here, it's the first time i'm using scrapy and i really need help. I know that this was asked before and i did try a lot of solutions but none of them works.
My pipelines file:
import sys
import MySQLdb
import hashlib
from scrapy.exceptions import NotConfigured
from scrapy.exceptions import DropItem
from scrapy.http import Request
from projetpfe.items import ProjetpfeItem
class MySQLStorePipeline(object):
def __init__(self):
try:
self.conn= MySQLdb.connect(user='root', passwd='root123', host='localhost', db='pressebam', use_unicode=True, charset='utf8')
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS scrapeddata2( idscrapedData INT NOT NULL AUTO_INCREMENT PRIMARY KEY, nomOrganePresse VARCHAR(200), titreArticle VARCHAR(200), url VARCHAR(200), nomJournaliste VARCHAR(200), jour VARCHAR(100), annee VARCHAR(100), categorie VARCHAR(100), contenuArticle VARCHAR(5000), lienImage VARCHAR(200)) ")
self.conn.commit()
except (AttributeError, MySQLdb.OperationalError), e:
raise e
def process_item(self, item, spider):
try:
self.cursor.execute( "INSERT INTO scrapeddata2 ( nomOrganePresse, titreArticle, url, jour, contenuArticle, lienImage) VALUES (%s, %s, %s,%s,%s, %s)",
(item['OrganePresse'],
item['Titre'],
item['URL'],
item['Jour'],
item['Contenu'],
item['LienImage'] ))
self.conn.commit()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return item
And this is my spider file
import urlparse
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from projetpfe.items import ProjetpfeItem
class ProjetpfeSpider(CrawlSpider):
name = 'telquel'
start_urls = ['http://telquel.ma'] # urls from which the spider will start crawling
rules = [Rule(SgmlLinkExtractor(allow=[r'page/\d+']), follow=True),
# r'page/\d+' : regular expression for http://telquelpage/X URLs
Rule(SgmlLinkExtractor(allow=[r'\d{4}/\d{2}/\d{2}/\w+']), callback='parse_telquel')]
# r'\d{4}/\d{2}/\w+' : regular expression for http://telquel.ma/YYYY/MM/title URLs
def parse_telquel(self, response):
hxs = HtmlXPathSelector(response)
item = ProjetpfeItem()
# XPath selector for title
item['Titre'] = hxs.select("//h1[#class='article-title']/text()").extract()
item['LienImage'] = hxs.select("//div[#class='main-article-content']//img[#class='setborder']/#src").extract()
item['OrganePresse'] = hxs.select("//img[#class='logo']/#alt").extract()
item['Jour'] = hxs.select("//div[#class='calendar-date']/text()").extract()
item['Contenu'] = hxs.select("//div[#class='shortcode-content']").extract()
item['URL'] = hxs.select("/html/head/link[5]/#href").extract()
return item
This is the settings file
BOT_NAME = 'projetpfe'
SPIDER_MODULES = ['projetpfe.spiders']
NEWSPIDER_MODULE = 'projetpfe.spiders'
ITEM_PIPELINES = {'projetpfe.pipelines.MySQLStorePipeline' : 300}
and finally my items
from scrapy.item import Item, Field
class ProjetpfeItem(Item):
OrganePresse = Field()
Titre = Field()
Journaliste = Field()
Jour = Field()
Annee = Field()
Categorie = Field()
Contenu = Field()
LienImage = Field()
URL = Field()
So the spider works fine but nada is stored in the database. HELP!!!

Writing items to a MySQL database in Scrapy

I am new to Scrapy, I had the spider code
class Example_spider(BaseSpider):
name = "example"
allowed_domains = ["www.example.com"]
def start_requests(self):
yield self.make_requests_from_url("http://www.example.com/bookstore/new")
def parse(self, response):
hxs = HtmlXPathSelector(response)
urls = hxs.select('//div[#class="bookListingBookTitle"]/a/#href').extract()
for i in urls:
yield Request(urljoin("http://www.example.com/", i[1:]), callback=self.parse_url)
def parse_url(self, response):
hxs = HtmlXPathSelector(response)
main = hxs.select('//div[#id="bookshelf-bg"]')
items = []
for i in main:
item = Exampleitem()
item['book_name'] = i.select('div[#class="slickwrap full"]/div[#id="bookstore_detail"]/div[#class="book_listing clearfix"]/div[#class="bookstore_right"]/div[#class="title_and_byline"]/p[#class="book_title"]/text()')[0].extract()
item['price'] = i.select('div[#id="book-sidebar-modules"]/div[#class="add_to_cart_wrapper slickshadow"]/div[#class="panes"]/div[#class="pane clearfix"]/div[#class="inner"]/div[#class="add_to_cart 0"]/form/div[#class="line-item"]/div[#class="line-item-price"]/text()').extract()
items.append(item)
return items
And pipeline code is:
class examplePipeline(object):
def __init__(self):
self.dbpool = adbapi.ConnectionPool('MySQLdb',
db='blurb',
user='root',
passwd='redhat',
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=True
)
def process_item(self, spider, item):
# run db query in thread pool
assert isinstance(item, Exampleitem)
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self.handle_error)
return item
def _conditional_insert(self, tx, item):
print "db connected-=========>"
# create record if doesn't exist.
tx.execute("select * from example_book_store where book_name = %s", (item['book_name']) )
result = tx.fetchone()
if result:
log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
else:
tx.execute("""INSERT INTO example_book_store (book_name,price)
VALUES (%s,%s)""",
(item['book_name'],item['price'])
)
log.msg("Item stored in db: %s" % item, level=log.DEBUG)
def handle_error(self, e):
log.err(e)
After running this I am getting the following error
exceptions.NameError: global name 'Exampleitem' is not defined
I got the above error when I added the below code in process_item method
assert isinstance(item, Exampleitem)
and without adding this line I am getting
**exceptions.TypeError: 'Example_spider' object is not subscriptable
Can anyone make this code run and make sure that all the items saved into database?
Try the following code in your pipeline
import sys
import MySQLdb
import hashlib
from scrapy.exceptions import DropItem
from scrapy.http import Request
class MySQLStorePipeline(object):
def __init__(self):
self.conn = MySQLdb.connect('host', 'user', 'passwd',
'dbname', charset="utf8",
use_unicode=True)
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
try:
self.cursor.execute("""INSERT INTO example_book_store (book_name, price)
VALUES (%s, %s)""",
(item['book_name'].encode('utf-8'),
item['price'].encode('utf-8')))
self.conn.commit()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return item
Your process_item method should be declared as: def process_item(self, item, spider): instead of def process_item(self, spider, item): -> you switched the arguments around.
This exception: exceptions.NameError: global name 'Exampleitem' is not defined indicates you didn't import the Exampleitem in your pipeline.
Try adding: from myspiders.myitems import Exampleitem (with correct names/paths ofcourse).
I think this way is better and more concise:
#Item
class pictureItem(scrapy.Item):
topic_id=scrapy.Field()
url=scrapy.Field()
#SQL
self.save_picture="insert into picture(`url`,`id`) values(%(url)s,%(id)s);"
#usage
cur.execute(self.save_picture,dict(item))
It's just like
cur.execute("insert into picture(`url`,`id`) values(%(url)s,%(id)s)" % {"url":someurl,"id":1})
Cause (you can read more about Items in Scrapy)
The Field class is just an alias to the built-in dict class and doesn’t provide any extra functionality or attributes. In other words, Field objects are plain-old Python dicts.