Locust/Selenium - Running in headless mode the execution stops after user logs in - selenium-chromedriver

I'm running a configuration with Locust | realbrowserlocusts and with a certain application the execution just stops after the assigned user process logs into the application in headlessmode. In browsermode the execution keeps going with no issues. The full logging in the terminal window shows the user logs in - in it's correct state.
I don't have the same issue with a different application, same approach so I'm really puzzled over it.
My first thought was that a redirect was occurring and possibly the webdriver was loosing context with the window so I added some code: self.client.switch_to.default_content() which didn't make any difference. I took out a wait.until function call on the next line and placed a wait timer and that didn't make any difference either as the next object call failed to execute.
self.client.wait.until(EC.visibility_of_element_located((By.ID, "wwe-workspace-tab-0")))
Here's my current chrome options.
def init(self):
super(HeadlessChromeLocust, self).init()
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument("--disable-gpu")
options.add_argument("--disable-crash-reporter")
options.add_argument("--disable-extensions")
options.add_argument("--disable-in-process-stack-traces")
options.add_argument("--disable-logging")
# options.add_argument("--log-level=4")
options.add_argument("--output=/dev/null")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--auth-server-whitelist=*')
from realbrowserlocusts import HeadlessChromeLocust
from realbrowserlocusts import ChromeLocust
from selenium.webdriver.common.by import By
# from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support import expected_conditions as EC
from locust import TaskSet, task
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import ElementNotInteractableException
from selenium.webdriver.support import expected_conditions as EC
from datetime import datetime
import win32com.client
import time
import calendar
import logging, sys
import csv
usernames = [
"LoadTest_Agent001",
"LoadTest_Agent002",
"LoadTest_Agent003",
"LoadTest_Agent004",
"LoadTest_Agent005",
"LoadTest_Agent006",
"LoadTest_Agent007",
"LoadTest_Agent010",
"LoadTest_Agent009",
"LoadTest_Agent008"
]
SHORT_SLEEP = 5
MEDIUM_SLEEP = 10
LONG_SLEEP = 20
class Omnichat(TaskSet):
# self.message = ''
def on_start(self):
self.message = ''
if len(usernames) > 0:
self.username = usernames.pop()
print(self.username)
self.client.delete_all_cookies()
def open_agent_homepage(self):
print("SitePage")
# Load the site page
self.client.get("https://myhost/ui/ad/v1/index.html")
time.sleep(2)
print("SitePage check for element on site page")
self.client.wait.until(EC.visibility_of_element_located((By.ID, "Layer_2")), "Site page timeout")
def open_agent_login(self):
self.client.wait.until(EC.visibility_of_element_located((By.ID, "wweLoginUserNameField")), "Login Field")
# Enter username
self.client.find_element_by_id("wweLoginUserNameField").click()
self.client.find_element_by_id("wweLoginUserNameField").clear()
self.client.find_element_by_id("wweLoginUserNameField").send_keys(self.username)
self.client.find_element_by_id("wweLoginSignInButton").click()
print("looking for My workspace after login")
# self.client.wait.until(EC.visibility_of_element_located((By.XPATH, "//a[#id='wwe-workspace-tab-0']/span")))
# FAILS TO MOVE PAST THIS POINT
self.client.wait.until(EC.visibility_of_element_located((By.ID, "wwe-workspace-tab-0")))
print("Found the workspace text")
# self.client.wait.until(EC.visibility_of_element_located((By.XPATH, "//a[#id='wwe-workspace-tab-0']/span")))

I did not simulate this on my machine, but just a suggestion quick reading through articles I learned that default window size for chrome in headless mode could be too small 800x600 (not fully maximized)
You can try amending code to launch headless chrome in your chrome options.
Reference:
https://forum.katalon.com/t/headless-element-not-visible/7217/3
https://itnext.io/how-to-run-a-headless-chrome-browser-in-selenium-webdriver-c5521bc12bf0

Related

Selenium Python XPATH element unable to locate element

What wrong am I doing. XPATH is correct but it says no element found. I also tried with find_elements(By.XPATH, "/html/body/div[3]/div[3]/div[5]/div[1]/table[]/tbody/tr[]/td[1]/a")
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By`enter code here`
driver = webdriver.Chrome(r'C:\\Users\\<Username>\\Downloads\\chromedriver_win32 (5)\\chromedriver.exe')
# rows = driver.find_elements_by_xpath("/html/body/div[3]/div[3]/div[5]/div[1]/table[*]/tbody/tr[*]/td[1]/a")
driver.get("https://www.census2011.co.in/data/subdistrict/5542-bangalore-north-bangalore-karnataka.html")
# time.sleep(15)
# rows = driver.find_element(By.XPATH, '/html/body/div[2]/div/div[1]/div[2]/table/tbody/tr[*]/td[2]/a')
rows = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[2]/div/div[1]/div[2]/table/tbody/tr[*]/td[2]/a")))
print(rows)
data = [row.text.strip() for row in rows]
print(*data, sep = "\n")
# driver.close()
Try this,
rows = driver.find_elements(By.XPATH, "//table[contains(#class, 'table table-striped table-hover')]/tbody/tr/td/a")
[row.get_attribute("href") for row in rows]
Output -
['https://www.census2011.co.in/data/town/803162-bbmp-karnataka.html',
'https://www.census2011.co.in/data/town/612950-chikkabanavara-karnataka.html',
'https://www.census2011.co.in/data/town/612949-hunasamaranahalli-karnataka.html',
'https://www.census2011.co.in/data/town/612951-madanaiyakanahalli-karnataka.html',
'https://www.census2011.co.in/data/town/612952-chikkabidarakallu-karnataka.html',
'https://www.census2011.co.in/data/town/612948-kadigenahalli-karnataka.html',
'https://www.census2011.co.in/data/village/612779-adakamaranahalli-karnataka.html',
....]

Import reflected Flask-SQLAlchemy Module before creating the app

This is a continuation of this question.
As my flask app should not write anything in my database, I set up Flask-SQLAlchemy to reflect my database. This way I do not have to change my models, when I change my schema:
# app/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
db.init_app(app)
with app.app_context():
db.Model.metadata.reflect(db.engine)
# app/models.py
from app import db
class Data(db.Model):
__table__ = db.Model.metadata.tables['data']
But now, if I have to import the Model before I created the app, I run into Errors because the metadata is not set yet. This is a problem when it comes to testing for example:
# test.py
import unittest
from app import create_app, db
from app.models import Data
class TestGUI(unittest.TestCase):
#classmethod
def setUpClass(cls):
cls.app = create_app()
# etc ...
This throws KeyError: 'data' in __table__ = db.Model.metadata.tables['data'] when importing from app.models as the metadata is not correctly set before the create_app() function is run.
I did find a solution (thanks to #snakecharmerb). The solution is simply to avoid the problem, to not import app.models before running create_app(). A bit hacky, so feel free to post an answer as well, when you have a better solution.
My test file now looks like this:
# test.py
import unittest
from app import create_app, db
app = create_app()
from app.models import Data
class TestGUI(unittest.TestCase):
#classmethod
def setUpClass(cls):
cls.app = app
# etc ...

i don`t know what to do now with the problem invalid syntax (<unknown>, line 20)pylint(syntax-error)

I'm trying to build my own desktop assistant and got problem with first line. Checking if i got extra space or line but all looks ok, could you please check if anything is wrong?
I did add my script in progress and picture. Thank you all !!
import speech_recognition as sr
import os
import sys
import re
import webbrowser
import smtplib
import requests
import subprocess
from pyowm import OWM
import youtube_dl
import vlc
import urllib
import urllib2
import json
from bs4 import BeautifulSoup as soup
from urllib2 import urlopen
import wikipedia
import random
from time import strftime
def sofiaResponse (audio);
"speaks audio passed as argument"
print(audio)
for line in audio.splitlines():
os.system("say" + audio)
def myCommand ():
"listens for commands"
r = sr.Recognizer()
with sr.Microphone() as source:
print('Say something...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
#loop back to continue listening
except sr.UnknownValueError:
print('Error, help me error')
command = myCommand();
return command
def assistant (command):
"if statements for executing commands"
enter image description here
On line 22, you made a typo :
def sofiaResponse (audio);
should be
def sofiaResponse (audio):

Scrapy / Pipeline not inserting data to MySQL database

I'm making a pipeline in scrapy to store scraped data in a mysql database. When the spider is run in terminal it works perfectly. Even the pipeline is opened. However the data is not being sent to the database. Any help appreciated! :)
here's the pipeline code:
import sys
import MySQLdb
import hashlib
from scrapy.exceptions import DropItem
from scrapy.http import Request
from tutorial.items import TutorialItem
class MySQLTest(object):
def __init__(self):
db = MySQLdb.connect(user='root', passwd='', host='localhost', db='python')
cursor = db.cursor()
def process_item(self, spider, item):
try:
cursor.execute("INSERT INTO info (venue, datez) VALUES (%s, %s)", (item['artist'], item['date']))
self.conn.commit()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return item
and heres the spider code
import scrapy # Import required libraries.
from scrapy.selector import HtmlXPathSelector # Allows for path detection in a websites code.
from scrapy.spider import BaseSpider # Used to create a simple spider to extract data.
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor # Needed for the extraction of href links in HTML to crawl further pages.
from scrapy.contrib.spiders import CrawlSpider # Needed to make the crawl spider.
from scrapy.contrib.spiders import Rule # Allows specified rules to affect what the link
import spotipy
import soundcloud
import mysql.connector
from tutorial.items import TutorialItem
class AllGigsSpider(CrawlSpider):
name = "allGigs" # Name of the Spider. In command promt, when in the correct folder, enter "scrapy crawl Allgigs".
allowed_domains = ["www.allgigs.co.uk"] # Allowed domains is a String NOT a URL.
start_urls = [
"http://www.allgigs.co.uk/whats_on/London/clubbing-1.html",
"http://www.allgigs.co.uk/whats_on/London/festivals-1.html",
"http://www.allgigs.co.uk/whats_on/London/comedy-1.html",
"http://www.allgigs.co.uk/whats_on/London/theatre_and_opera-1.html",
"http://www.allgigs.co.uk/whats_on/London/dance_and_ballet-1.html"
] # Specify the starting points for the web crawler.
rules = [
Rule(SgmlLinkExtractor(restrict_xpaths='//div[#class="more"]'), # Search the start URL's for
callback="parse_me",
follow=True),
]
def parse_me(self, response):
for info in response.xpath('//div[#class="entry vevent"]|//div[#class="resultbox"]'):
item = TutorialItem() # Extract items from the items folder.
item ['artist'] = info.xpath('.//span[#class="summary"]//text()').extract() # Extract artist information.
item ['date'] = info.xpath('.//span[#class="dates"]//text()').extract() # Extract date information.
#item ['endDate'] = info.xpath('.//abbr[#class="dtend"]//text()').extract() # Extract end date information.
#item ['startDate'] = info.xpath('.//abbr[#class="dtstart"]//text()').extract() # Extract start date information.
item ['genre'] = info.xpath('.//div[#class="header"]//text()').extract()
yield item # Retreive items in item.
client = soundcloud.Client(client_id='401c04a7271e93baee8633483510e263')
tracks = client.get('/tracks', limit=1, license='cc-by-sa', q= item['artist'])
for track in tracks:
print(tracks)
I believe the problem was in my settings.py file where i had missed a comma... yawn.
ITEM_PIPELINES = {
'tutorial.pipelines.MySQLTest': 300,
}

Not able to login by Chrome browser via Selenium Python Code

I have to open a website where I have to provide the login id and password.
I wrote code in Selenium using Python for webdrivers but I m anot able to login as script is not able to fill the password box where as user name box is filled by script.
below is the code. Same code works on IE and Firefox.
from selenium import webdriver
from selenium import webdriver
import selenium.webdriver.chrome.webdriver
from selenium.webdriver import Chrome as Browser
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
class LoginAa(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.base_url = "https://hostname.com"
self.verificationErrors = []
self.accept_next_alert = True
def test_login_aa(self):
driver = self.driver
driver.get("https://hostname.com")
time.sleep(5)
self.assertEqual("User Login", driver.title)
#driver.find_element_by_id("IDToken1").clear()
driver.find_element_by_id("IDToken1").send_keys("abcd")
time.sleep(5)
driver.find_element_by_id("IDToken2").clear()
driver.find_element_by_id("IDToken2").send_keys("qwerty")
#driver.find_element_by_css_selector("input[type='password']").click()