i don`t know what to do now with the problem invalid syntax (<unknown>, line 20)pylint(syntax-error) - json

I'm trying to build my own desktop assistant and got problem with first line. Checking if i got extra space or line but all looks ok, could you please check if anything is wrong?
I did add my script in progress and picture. Thank you all !!
import speech_recognition as sr
import os
import sys
import re
import webbrowser
import smtplib
import requests
import subprocess
from pyowm import OWM
import youtube_dl
import vlc
import urllib
import urllib2
import json
from bs4 import BeautifulSoup as soup
from urllib2 import urlopen
import wikipedia
import random
from time import strftime
def sofiaResponse (audio);
"speaks audio passed as argument"
print(audio)
for line in audio.splitlines():
os.system("say" + audio)
def myCommand ():
"listens for commands"
r = sr.Recognizer()
with sr.Microphone() as source:
print('Say something...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
#loop back to continue listening
except sr.UnknownValueError:
print('Error, help me error')
command = myCommand();
return command
def assistant (command):
"if statements for executing commands"
enter image description here

On line 22, you made a typo :
def sofiaResponse (audio);
should be
def sofiaResponse (audio):

Related

Has pandas_datareader been updated?

I tried to run this code in pycharm:
import pandas as pd
import numpy as np
import datetime as dt
import pandas_datareader as web
start = dt.datetime.now()-dt.timedelta(days=365*3)
end = dt.datetime.now()
data=web.DataReader('AAPL', 'yahoo', start, end)
But it only gives errors, such as- "line 8, in data=web.DataReader('AAPL', 'yahoo', start, end)".
What is the problem?
Thanks in advance.
I tried to look for a solution from the internet, but found nothing.

How to fix "FileNotFoundError" when using proper code and file extension CSV?

I am trying to open a file with the extension .csv in python, however it keeps saying that the file is not found. I am copying the path from the side bar, so I don't believe that's the problem
I have tried to insert / and ./ before the path of the file
And r in front of the file name
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
bkgrnd = pd.read_csv('/Desktop/Sro/Natrium22.csv')
No matter what I've tried, it keeps saying FileNotFoundError
you can import csv if file will be always .csv,
import csv
with open('C:\Users\user\Desktop\Sro\Natrium22.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
specifix on windows, it needs normalization of your pathname, maybe thats the issue,
try doing, will surely work,
import os
import pandas as pd
cwd = os.getcwd()
filePath = 'C:/Users/user/Desktop/Sro/Natrium22.csv'
data = pd.read_csv(os.path.normcase(os.path.join(cwd, filePath)))
print(data)
you can try even,
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
bkgrnd = pd.read_csv(r'C:\Users\user\Desktop\Sro\Natrium22.csv')
print(bkgrnd)

raise_FirstSetError in SpaCy topic modeling

I want to create a LDA topic model and am using SpaCy to do so, following a tutorial. The error I receive when I try to use spacy is one I cannot find on google, so I'm hoping someone here knows what it's about.
I'm running this code on Anaconda:
import numpy as np
import pandas as pd
import re, nltk, spacy, gensim
# Sklearn
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from pprint import pprint
# Plotting tools
import pyLDAvis
import pyLDAvis.sklearn
import matplotlib.pyplot as plt
df = pd.DataFrame(data)
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True))
# deacc=True removes punctuations
data_words = list(sent_to_words(data))
print(data_words[:1])
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append(" ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc if token.pos_ in allowed_postags]))
return texts_out
nlp = spacy.load('en', disable=['parser', 'ner'])
# Do lemmatization keeping only Noun, Adj, Verb, Adverb
data_lemmatized = lemmatization(data_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:1])
And I receive the following error:
File "C:\Users\maart\AppData\Local\Continuum\anaconda3\lib\site-packages\_regex_core.py", line 1880, in get_firstset
raise _FirstSetError()
_FirstSetError
The error must occur somewhere after the lemmatization, because the other parts work fine.
Thanks a bunch!
I had this same issue and I was able to resolve it by uninstalling regex (I had the wrong version installed) and then running python -m spacy download en again. This will reinstall the correct version of regex.

How to convert Fantasy Premier League Data from JSON to CSV?

I am new to python and as per my thesis work I am trying to convert JSON to csv.I am able to download data in JSON but when I am writing it back using dictionaries it is not converting JSON to CSV with every column.
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import requests
from pprint import pprint
import csv
from time import sleep
s1='https://fantasy.premierleague.com/drf/element-summary/'
print s1
players = []
for player_link in range(1,450,1):
link = s1+""+str(player_link)
print link
r = requests.get(link)
print r
player =r.json()
players.append(player)
sleep(1)
with open('C:\Users\dell\Downloads\players_new2.csv', 'w') as f: # Just use 'w' mode in 3.x
w = csv.DictWriter(f,player.keys())
w.writeheader()
for player in players:
w.writerow(player)
I have uploaded the expected output(dec_15_expected.csv) and the program out with file name "player_new_wrong_output.csv"
https://drive.google.com/drive/folders/0BwKYmRU_0K6tZUljd3Q0aG1LT0U?usp=sharing
It will be a great help if some can tell what I am doing wrong.
Converting JSON to CSV is simple with pandas. Try this:
import pandas as pd
df=pd.read_json("input.json")
df.to_csv('output.csv')

Scraping content from page encountering None type return

I am crawling company profile info from website like http://www.sfma.org.sg/member/info/a-linkz-marketing-pte-ltd
and I want the content from company name to website before category content. I have write some code, but it returns None type error.
Could anyone help to take a look at the code and see what went wrong? Many thanks.
import sys
import csv
import urllib
import requests
from bs4 import BeautifulSoup
import time
import datetime
from random import randint
import numpy as np
import pandas as pd
fi = open('Input_List.csv', 'r')
#readers = list(csv.reader(fi))
readers=csv.reader(fi)
#print(readers)
df = []
for reader in readers:
#print(str(reader)[1:-1]+"\n")
url=str(reader)[2:-2]
request = requests.session()
htmlpage = requests.get(url)
#print("status code: "+ str(htmlpage.status_code))
if htmlpage.status_code != 200 :
break # something went wrong #
soup = BeautifulSoup(htmlpage.text,'lxml')
for result_table in soup.**findall**("div", {"class": "w3-container"}):
#content=result_table.find('p')
#print(result_table)
content=result_table.text
if(content.find("Website") > -1):
index=content.find("Website")
content=content[:content.find("\n",index)]
#print(content)
df=np.append(df,content)
break
#print(content)
df=np.append(df,str(content))
#print(df)
df1 = pd.DataFrame(df)
df1.to_csv("SFMA.csv",index=False,encoding='utf-8')
# #df.savetxt("SFMA.csv", index=False,encoding='utf-8')
# #df.save("SFMA.csv")
fi.close()
You made a typo. soup.findall("div", {"class": "w3-container"}): should be soup.find_all("div", {"class": "w3-container"}):
There's no findall method in BeautifulSoup, thus it doesn't work and returns None.
To get the data from company name to website you can do this.
for result_table in soup.find_all("div", {"class": "w3-container"}):
content=result_table.text
index=content.find("Website")
if(index > -1):
content=content[:content.find("\n",index)]
print(content)
break