So I am having trouble obtaining the href link for the next pages of the url. I got up to obtaining all the text and what not that the tag contains but I can't seem to wrap my head around to removing the text that I don't need and just obtaining the href and navigating through the pages.
Here is my code:
import requests
from bs4 import BeautifulSoup
import webbrowser
import time
jobsearch = input("What type of job?: ")
location = input("What is your location: ")
url = ("https://ca.indeed.com/jobs?q=" + jobsearch + "&l=" + location)
base_url = 'https://ca.indeed.com/'
r = requests.get(url)
rcontent = r.content
prettify = BeautifulSoup(rcontent, "html.parser")
filter_words = ['engineering', 'instrumentation', 'QA']
all_job_url = []
nextpages = []
filtered_job_links = []
http_flinks = []
flinks = []
def all_next_pages():
pages = prettify.find_all('div', {'class':'pagination'})
for next_page in pages:
next_page.find_all('a')
nextpages.append(next_page)
print(next_page)
all_next_pages()
Here is a way to get the links of the search result items. Find row result class and then find a tag, it contains all the information you need.
import requests
from bs4 import BeautifulSoup
import webbrowser
import time
jobsearch = input("What type of job?: ")
location = input("What is your location: ")
url = ("https://ca.indeed.com/jobs?q=" + jobsearch + "&l=" + location)
base_url = 'https://ca.indeed.com/'
r = requests.get(url)
rcontent = r.text
prettify = BeautifulSoup(rcontent, "lxml")
filter_words = ['engineering', 'instrumentation', 'QA']
all_job_url = []
nextpages = []
filtered_job_links = []
http_flinks = []
flinks = []
def all_next_pages():
pages = prettify.find_all('div', {'class':' row result'})
for next_page in pages:
info = next_page.find('a')
url = info.get('href')
title = info.get('title')
print(title,url)
all_next_pages()
Related
class gmarket_sales():
def __init__(self):
chrome_driver = Service(ChromeDriverManager().install())
options = Options()
options.add_experimental_option('detach',True)
options.add_experimental_option('excludeSwitches',['enable-logging'])
# options.add_argument('--headless')
# options.add_argument('--window-size = x, y')
# options.add_argument('--start-maximazed')
# options.add_argument('--start-fullscreen')
# options.add_argument('--mute-audio')
self.driver = webdriver.Chrome(options=options,service=chrome_driver)
self.now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S (%a)')
self.hour = datetime.datetime.now().strftime('%H시_%M_분')
self.today = date.today()
self.folder = None
self.today_file = None
self.kakao_talk = kakao()
def connect(self):
url = 'http://minishop.gmarket.co.kr/meritblog'
# url = 'http://minishop.gmarket.co.kr/hanvitis'
self.driver.get(url)
return url
def shopping_mall(self):
mall_name = self.driver.find_element(By.CSS_SELECTOR,'a.shop_title_ui_txt').text
self.folder = f'./메리트몰_데이터베이스/지마켓'
self.today_file = f'{self.today}_{mall_name}_지마켓.json'
return mall_name
def soup(self,url_param):
try:
response = requests.get(url_param)
if response.status_code == 200:
sp = BeautifulSoup(response.text, 'html.parser')
return sp
except requests.packages.urllib3.exceptions.MaxRetryError as e:
print(str(e))
def total_product(self):
total_items = 0
products = self.driver.find_element(By.ID,'ulCategory').find_elements(By.CSS_SELECTOR,'span.data_num')
for product in products:
items = int(product.text.replace('(',"").replace(')',""))
total_items += items
# 391개
return total_items
def paging(self,total_items,url):
page_list = []
# 전체상품보기 클릭
self.driver.execute_script('arguments[0].click();',self.driver.find_element(By.CSS_SELECTOR,'.allList_view > a'))
time.sleep(2)
# 한 페이지의 상품 수
view_limit = int(self.driver.find_element(By.CSS_SELECTOR,'div.limit').text.replace("개씩",""))
# 페이지 수 구하기
if total_items % view_limit == 0:
page = total_items // view_limit
else:
page = total_items // view_limit + 1
# 페이지 리스트
for cnt in range(page):
page_url = f'{url}/List?CategoryType=General&SortType=FocusRank&DisplayType=SmallImage&Page={cnt+1}&PageSize=60'
page_list.append(page_url)
# self.driver.quit()
return page_list
def data_one(self,page_list):
"""상품 url 리스트
정상가/할인가/할인율 딕셔너리"""
url_list = []
price_dic = {}
for page in page_list:
html = self.soup(page)
for items in html.find('ul',class_='type2').find_all('li'):
# url
item_url = items.find('a')['href']
# 상품코드
item_code = item_url[-10:]
# 가격 및 할인율
if items.find('p',class_='prd_price').find('span',class_='del_important'):
original_price = items.find('p',class_='prd_price').find('span',class_='del_important').text.replace("원","")
discount_price = items.find('p',class_='prd_price').find('strong').text.replace("원","")
sale_rate = items.find('p',class_='prd_price').find('span',class_='splt_ico usr_clr').text
else:
original_price = items.find('p',class_='prd_price').find('strong').text.replace("원","")
discount_price = "없음"
sale_rate = "없음"
url_list.append(item_url)
price_dic[item_code]={"정상가":original_price,"할인가":discount_price,"할인율":sale_rate}
time.sleep(randint(1,10))
self.driver.quit()
return url_list , price_dic
def check_start(self):
url = self.connect()
mall_name = self.shopping_mall()
total_items = self.total_product()
page_list = self.paging(total_items,url)
url_list,price_dic = self.data_one(page_list)
if __name__ == "__main__":
g_market = gmarket_sales()
# g_market.check_start()
schedule.every().hour.at(":20").do(g_market.check_start)
while True:
schedule.run_pending()
time.sleep(1)
Hello, I am a student practicing web page crawling.
I'm making a code that scrapes data by parsing a website with selenium.
I wrote the program so that it runs at regular intervals using the final schedule module.
However, if chrome_driver is initialized in the init of the class and the driver.quit() command is put in the execution process, the MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError code is displayed when the second code is executed..
Below is the code I wrote.
I would really appreciate it if you could point out any problems.
I'm trying to scrape data and it works fine if the {fplid} for url is like 30 for example. How do I fix this method, so it gets the user input and gets the data from the url without a decode error. This is the traceback
'''
C:\Users\krish\OneDrive\Desktop\FPLHangout\scrape\views.py, line 31, in home
data = get_html_content(fplid) …
Local vars
C:\Users\krish\OneDrive\Desktop\FPLHangout\scrape\views.py, line 9, in get_html_content
managerdata = json.loads(r.text)
def get_html_content(fplid):
url = 'https://fantasy.premierleague.com/api/entry/{fplid}/event/30/picks/'
r = requests.get(url)
managerdata = json.loads(r.text)
bootstrap = 'https://fantasy.premierleague.com/api/bootstrap-static/'
bootstrapdata = requests.get(bootstrap)
bootstrapjson = json.loads(bootstrapdata.text)
for pick in managerdata['picks']:
pick = (pick['element']) #correct id
location = 0
for player in bootstrapjson['elements']:
if player.get('id') == pick:
break
location += 1
#position = (pick['position'])
firstname = bootstrapjson['elements'][location]['first_name']
secondname = bootstrapjson['elements'][location]['second_name']
return firstname + " " + secondname
def home(request):
if 'fplid' in request.GET: #
fplid = request.GET.get('fplid')
data = get_html_content(fplid)
return render(request, 'scrape/home.html', {'fpldata': data})
return render(request, 'scrape/home.html')
i'm trying to scraping into Html with beautiful soup but i've difficult scraping desired data.
i've write this but in this way i found all script in the Html.
response = s.get(dataObject.url)
soup = BeautifulSoup(response.text, 'html.parser')
script = soup.findAll('script')
I need to take 'pf_id' value from this html:
<script>
var qSVariables = ""; // Store Variables
var prodzoom_control = 'on';
var siteSubDirectory = "";
var addbsktTimeElapseVal = "1000";
var pageidentifie = 'product';
var promoSliderHours = "720";
var isKnockout = true;
var product_model_wears_lable = 'Model is wearing';
$(document).ready(function(){
var myVariantGridEM = new variantGrid('myVariantGridEM'); // Load the variant grid class
variants = { "40387759":{'sequence':'','opt2_sequence':'991','wasprice':'RRP €107.50','wasprice_net':'RRP €0.00','price':'€107.50','price_net':'€107.50','sale_item':'false','manufacturer_sku':'','manufacturer_name':'Jordan','option1':'White / University Red / White','option2':'6','option3':'','pf_id':'4038775','sell':'true','stock_status':'in stock','option2_js':'Size 6','option2_mob_js':'Size: 6','stock_int':1,'lead_text':'','promoicon':'<img src=\"/images/articles/icon_freedelivery_large.png\"
</script>
You can split string by "," and then look for 'pf_id'. If you find it split by ":" this time and get second value.
from bs4 import BeautifulSoup
data = """<script>
var qSVariables = ""; // Store Variables
var prodzoom_control = 'on';
var siteSubDirectory = "";
var addbsktTimeElapseVal = "1000";
var pageidentifie = 'product';
var promoSliderHours = "720";
var isKnockout = true
var product_model_wears_lable = 'Model is wearing';
$(document).ready(function(){
var myVariantGridEM = new variantGrid('myVariantGridEM'); // Load the variant grid class
variants = { "40387759":{'sequence':'','opt2_sequence':'991','wasprice':'RRP €107.50','wasprice_net':'RRP €0.00','price':'€107.50','price_net':'€107.50','sale_item':'false','manufacturer_sku':'','manufacturer_name':'Jordan','option1':'White / University Red / White','option2':'6','option3':'','pf_id':'4038775','sell':'true','stock_status':'in stock','option2_js':'Size 6','option2_mob_js':'Size: 6','stock_int':1,'lead_text':'','promoicon':'<img src=\"/images/articles/icon_freedelivery_large.png\"
</script>
"""
soup = BeautifulSoup(data, "html.parser")
script = soup.find("script")
splited_text = str(script).split(',')
result = ""
for i in range(len(splited_text)):
if "pf_id" in splited_text[i]:
result = splited_text[i].split(":")[1]
break
print(result)
I am trying to webscrape a website that has multiple pages that are rendered by Javascript. I am using BeautifulSoup and Selenium. I have a script that works but only for the first page of the website. Is it possible to webscrape multiple javascript rendered pages or do I need to do them individually? Here is my script:
import time
from bs4 import BeautifulSoup as soup
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import json
# The path to where you have your chrome webdriver stored:
webdriver_path = '/Users/rawlins/Downloads/chromedriver'
# Add arguments telling Selenium to not actually open a window
chrome_options = Options()
chrome_options.add_argument('--headless')
#chrome_options.add_argument('--window-size=1920x1080')
# Fire up the headless browser
browser = webdriver.Chrome(executable_path = webdriver_path,
chrome_options = chrome_options)
# Load webpage
url = "https://cnx.org/search?q=subject:Arts"
browser.get(url)
# to ensure that the page has loaded completely.
time.sleep(3)
data = []
n = 2
for i in range(1, n+1):
if (i == 1):
# handle first page
response = requests.get(url)
response = requests.get(url + "&page=" + str(i))
#response = requests.get(url + "&page=" + str(i),headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'})
# Parse HTML, close browser
page_soup = soup(browser.page_source, 'lxml')
containers = page_soup.findAll("tr")
browser.quit()
for container in containers:
item = {}
item['type'] = "Course Material"
if container.find('td', {'class' : 'title'}):
item['title'] = container.find('td', {'class' : 'title'}).h4.text.strip()
else:
item['title'] = ""
if container.find('td', {'class' : 'authors'}):
item['author'] = container.find('td', {'class' : 'authors'}).text.strip()
else:
item['author'] = ""
if container.find('td', {'class' : 'title'}):
item['link'] = "https://cnx.org/" + container.find('td', {'class' : 'title'}).a["href"]
else:
item['link'] = ""
if container.find('td', {'class' : 'title'}):
item['description'] = container.find('td', {'class' : 'title'}).span.text
else:
item['description'] = ""
item['subject'] = "Arts"
item['source'] = "OpenStax CNX"
item['base_url'] = "https://cnx.org/browse"
item['license'] = "Attribution"
data.append(item) # add the item to the list
with open("js-webscrape.json", "w") as writeJSON:
json.dump(data, writeJSON, ensure_ascii=False)
Thanks in advance.
Couple of issues here:
You're mixing requests.get() in with browser.get(). No need for the requests module at all here since you're getting the page via the headless browser.
No need to have a special case for the first page. https://cnx.org/search?q=subject:Arts&page=1 works fine.
time.sleep() should be between browser.get() and the parsing, to allow the page to fully load before feeding it to BeautifulSoup.
You should write data to the JSON file outside the for loop, once all pages have been scraped.
Quit the browser outside the for loop as well, not after a single iteration.
To avoid encoding errors, specify the encoding when writing to the JSON file: with open("js-webscrape.json", "w", encoding="utf-8")
Here's a working implementation that scrapes all 7 pages:
import time
from bs4 import BeautifulSoup as soup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import json
# The path to where you have your chrome webdriver stored:
webdriver_path = '/Users/Gebruiker/Downloads/chromedriver_win32/chromedriver'
# Add arguments telling Selenium to not actually open a window
chrome_options = Options()
chrome_options.add_argument('--headless')
# Fire up the headless browser
browser = webdriver.Chrome(executable_path = webdriver_path, options = chrome_options)
# Load webpage
url = "https://cnx.org/search?q=subject:Arts"
data = []
n = 7
for i in range(1, n+1):
response = browser.get(url + "&page=" + str(i))
time.sleep(5)
# Parse HTML
page_soup = soup(browser.page_source,'lxml')
containers = page_soup.findAll("tr")
for container in containers:
item = dict()
item['type'] = "Course Material"
if container.find('td', {'class' : 'title'}):
item['title'] = container.find('td', {'class' : 'title'}).h4.text.strip()
else:
item['title'] = ""
if container.find('td', {'class' : 'authors'}):
item['author'] = container.find('td', {'class' : 'authors'}).text.strip()
else:
item['author'] = ""
if container.find('td', {'class' : 'title'}):
item['link'] = "https://cnx.org/" + container.find('td', {'class' : 'title'}).a["href"]
else:
item['link'] = ""
if container.find('td', {'class' : 'title'}):
item['description'] = container.find('td', {'class' : 'title'}).span.text
else:
item['description'] = ""
item['subject'] = "Arts"
item['source'] = "OpenStax CNX"
item['base_url'] = "https://cnx.org/browse"
item['license'] = "Attribution"
data.append(item) # add the item to the list
# write data to file and quit browser when done
print(data)
with open("js-webscrape.json", "w", encoding="utf-8") as writeJSON:
json.dump(data, writeJSON, ensure_ascii=False)
browser.quit()
Don't know what is going on. It was working perfectly with the other codes importing to csv file. But somehow in this one doesn't pass the data. It creates the file but is blank.
If anyone could give a tip would be appreciated. Quite possible that is just a simple solution because I'm a newbie.
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import re
import csv
filename = "test.csv"
f = open(filename, "a")
headers = "location, country, type, level, deep, vision, water,
access, life kind \n"
f.write(headers)
my_url = "http://www.example.com"
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
links = page_soup.select('sea > a[href]')
link = [tag.get('href') for tag in links]
for url in link:
Client = uReq(url)
pageHtml = Client.read()
Client.close()
pSoup = soup(pageHtml, "html.parser")
linkeas = pSoup.findAll(href=re.compile(my_url))
def linkas(href):
return href and re.compile("html").search(href) and re.compile(my_url).search(href)
linka = pSoup.findAll(href=linkas)
if linka != []:
linkia = [tag.get('href') for tag in linka]
for curl in linkia:
cClient = uReq(curl)
pageHtml = cClient.read()
cClient.close()
Soup = soup(pageHtml, "html.parser")
info = Soup.select('.val')
if info != None:
location = Soup.select('.last')
if location[0].string != 'Page 2':
country = Soup.select('.cru > a:nth-of-type(3)')
countri = country[0].text.strip()
print(countri)
locat = location[0].text.strip()
print(locat)
tipo = info[0].text.strip()
print(tipo)
expe = info[1].text.strip()
print(expe)
depth = info[2].text.strip()
print(depth)
avg = info[3].text.strip()
print(avg)
cur = info[4].text.strip()
print(tipo)
acc = info[5].text.strip()
print(acc)
life = info[6].text.strip()
print(life)
f.write(locat.replace(",", " |") + "," + countri.replace(",", " |") + "," + tipo.replace(",", " |") + "," + expe.replace(",", " |") + "," + depth.replace(",", " |") + "," + avg.replace(",", " |") + "," + cur.replace(",", " |") + "," + acc.replace(",", " |") + "," + life.replace(",", " |") + "\n")
continue
else:
continue