Why is this error occurring when I use writerow? - csv

Why am I getting this error when I try to use writerow
db.writerow(loanList[i])
TypeError: writerows() argument must be iterable
'''
import csv
header = ['Name','Results']
file = open("loanResults.csv", "w", newline = "")
a = 10000
b = [0.032,0.043,0.037,0.043,0.044,0.029,0.028,0.030]
c = [6,7,4,3,4,6,7,9,]
loanList = []
def monthlyRepayment(principalAmount,interest,year):
for i in range(len(b)):
finalAmount = principalAmount * ((1 + interest[i]) **year[i])
monthlyAmount = (finalAmount / (year[i]*12))
loanList.append(round(monthlyAmount))
print(round(monthlyAmount))
testCase1 = monthlyRepayment(a,b,c)
db = csv.writer(file)
db.writerow(header)
for i in range(len(loanList)):
db.writerows(loanList[i])
file.close()
'''

Related

wanting to write a txt with Tkinter entry box

just trying to allow users to write in their name and email address for it to be then written into a text file. There are no error messages that pop up it's just, it isn't writing into the file. also, the message box isn't coming up with (+ aname + '\n' + full email + '\n') it just comes up with the message. Cheers
import tkinter as tk #shortening tkinter
import tkinter.messagebox as box
import csv
from tkinter import *
def store_customers():
aname = name.get()
aemail = email.get()
aemailaddress = emailaddress.get()
fullemail = aemail + aemailaddress
print(fullemail)
if (name == "" or email == ""):
print('Error')
messagebox.showerror('error',"their was some issur with your information")
email.set('')
name.set('')
else:
result = messagebox.askquestion('question', 'Your about to enter yor information \n' + aname + '\n' + fullemail + '\n' )
if (result == 'yes'):
print('here')
with open ('customersdata.txt', 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([aname, fullemail])
csvfile.close()
else:
name.set('')
email.set('')
name = StringVar()
email = StringVar()
emailaddress = StringVar()
name = tk.Entry(frame4, text="", bg = '#F0EAD6', font=('Arial',24) )
name.place(x= 400, y = 600)
email = tk.Entry(frame4, text="", bg = '#F0EAD6', font=('Arial',24) )
email.place(x= 400, y = 660)
list1 = ['#yahoo.com','#bing.com','#jpc.vic.edu.au', '#gmail.com', '#hotmail.com' ]
emailaddres= OptionMenu(frame4,emailaddress,*list1)
emailaddres.config(height = 2 )
emailaddress.set('#***.***.edu.au')
emailaddres.place(x= 685, y= 660)
storebtn = tk.Button(frame4, text = 'complete', bg= '#F0EAD6', font=('Arial',24), command = store_customers)
storebtn.place (x = 430, y= 700)
tk.Label(frame4, text= "Your name", bg= '#F0EAD6', font=('Arial',24)).place(x = 260, y = 600)
tk.Label(frame4, text= "email address", bg= '#F0EAD6', font=('Arial',24)).place(x = 240, y = 660)
frame4.mainloop()
Code with Tkinter.
import csv
import tkinter
import tkinter.messagebox
def save():
nome_to_save = namevalue.get()
email_to_save = emailvalue.get()
if len(nome_to_save) == 0 or len(email_to_save) == 0:
tkinter.messagebox.showinfo('Error', 'You need to enter name and e-mail!')
else:
with open('customersdata.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([nome_to_save, email_to_save])
csvfile.close()
tkinter.messagebox.showinfo('Success', f'Information of {nome_to_save} saved!')
window = tkinter.Tk()
window.title("Write CSV")
namevalue = tkinter.StringVar()
emailvalue = tkinter.StringVar()
tkinter.Label(window, text = "Name").grid(row = 0)
name = tkinter.Entry(window, textvariable=namevalue).grid(row = 0, column = 1)
tkinter.Label(window, text = "E-mail").grid(row = 1)
email = tkinter.Entry(window, textvariable=emailvalue).grid(row = 1, column = 1)
tkinter.Button(window, text="Save", command = save).grid(row = 2)
tkinter.mainloop()
You probably receive some error, right?
You have imported tkinter.messagebox as box.
So replace messagebox.showerror and messagebox.askquestion with box.showerror and box.askquestion
PS: I am new here, so I cannot comment. Sorry!
This code can help you.
import csv
def store_customers(name, email):
if name == "" or email == "":
print('Error, i need name and email')
else:
with open('customersdata.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([name, email])
csvfile.close()
#store_customers('', '')
store_customers('Diego', 'diego#false.io')
store_customers('Ana Maria', 'ana.maria#myemail.io')
store_customers('Julia Neau', 'jneau#cool.io')

Load completely with request in python (or other ways)

Hi
I was wondering if I can load the page completely with python, for example, a hashtag page form Instagram
there is code I tried but it wouldn't load completely
Here's my code
import json
import re
import requests
x = input("Enter your hashtag: ")
response = requests.get('https://www.instagram.com/explore/tags/' + x + '/?__a=1')
if response.status_code == 404:
print('page not found')
input()
exit()
data = response.text
x = re.findall("\"shortcode\":\"[^\"][^\"][^\"][^\"][^\"][^\"][^\"][^\"][^\"][^\"][^\"][^\,]", data)
y = [i.split('"')[3] for i in x]
x = 0
z = len(y)
print(str(z)+' Posts found')
while x < z:
print('\r' + str(x) + ' posts done', end="")
data = requests.get('https://www.instagram.com/p/' + y[x] + '/?__a=1')
y[x] = data.text
x = x + 1
print()
print('post link finished')
Usernames = []
Posts = []
Followers = []
Following = []
x = 0
while x < z:
print('\r' + str(x) + ' Usernames done' , end="")
data = json.loads(y[x])
Usernames.append(data['graphql']['shortcode_media']['owner']['username'])
x = x + 1
print()
print('Usernames finished')
print(len(Usernames))
I want to have more usernames like 100k or more if you can help me with other libraries it isn't important

How to read data from csv file in tensorflow?

I want to read data from csv file in tensorflow .So I've been trying out different ways of reading a CSV file with 2000 lines and each line with 93 features,and I hope to get one-hot value.
my dataset is like this:
the first column is data of 93 features,and the second column is labels of 16 one-hot .
this is my code
import tensorflow as tf
# data_input = pd.read_csv('ans_string.csv')
# data_train = pd.read_csv('ans_result.csv')
x = tf.placeholder(tf.float32,[None,93])
W = tf.Variable(tf.zeros([93,16]))
b = tf.Variable(tf.zeros([16]))
sess = tf.InteractiveSession()
filename_queue = tf.train.string_input_producer(["dataset.csv"])
reader = tf.TextLineReader()
key,value = reader.read(filename_queue)
# _,csv_row = reader.read(filename_queue)
# data = tf.decode_csv(csv_row,record_fefaults = rDeraults)
record_defaults_key = [[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1]]
record_defaults_value = [[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1],[1]]
list_result_key = tf.decode_csv(key,record_defaults = record_defaults_key)
list_result_value = tf.decode_csv(value,record_defaults = record_defaults_value)
features = tf.stack(list_result_key)
labels = tf.stack(list_result_value)
y = tf.nn.softmax(tf.matmul(x,W)+b)
y_ = tf.placeholder(tf.float32,[None,16])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
with tf.Session() as sess:
# something happened
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord)
tf.global_variables_initializer().run()
for _ in range (1000):
example,label = sess.run([features,labels])
print(sess.run(example,label))
sess.run(train_step,feed_dict={x:example,y_:label})
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(sess.run(accuracy.eval({x:example,y_:label})))
coord.request_stop()
coord.join(threads)
I want to train my model,but I got Error like this.
How can I fix it?

read data from luasql with utf8 format

I need to read data in my query with utf8 format, I tried to change collation of my SQL database when I read data base on English alphabet every thing good, but I have trouble in Arabic or other languages.
I print a string stored in variable came from in mysql query and show me like this ???????
how I can solve this problem to show them correct?
After retrieving UTF-8 strings from database, you should manually convert them to CP1256.
You can use function str:fromutf8() defined below
local char, byte, pairs, floor = string.char, string.byte, pairs, math.floor
local table_insert, table_concat = table.insert, table.concat
local unpack = table.unpack or unpack
local function unicode_to_utf8(code)
-- converts numeric UTF code (U+code) to UTF-8 string
local t, h = {}, 128
while code >= h do
t[#t+1] = 128 + code%64
code = floor(code/64)
h = h > 32 and 32 or h/2
end
t[#t+1] = 256 - 2*h + code
return char(unpack(t)):reverse()
end
local function utf8_to_unicode(utf8str, pos)
-- pos = starting byte position inside input string (default 1)
pos = pos or 1
local code, size = utf8str:byte(pos), 1
if code >= 0xC0 and code < 0xFE then
local mask = 64
code = code - 128
repeat
local next_byte = utf8str:byte(pos + size) or 0
if next_byte >= 0x80 and next_byte < 0xC0 then
code, size = (code - mask - 2) * 64 + next_byte, size + 1
else
code, size = utf8str:byte(pos), 1
end
mask = mask * 32
until code < mask
end
-- returns code, number of bytes in this utf8 char
return code, size
end
local map_1256_to_unicode = {
[0x80] = 0x20AC,
[0x81] = 0x067E,
[0x82] = 0x201A,
[0x83] = 0x0192,
[0x84] = 0x201E,
[0x85] = 0x2026,
[0x86] = 0x2020,
[0x87] = 0x2021,
[0x88] = 0x02C6,
[0x89] = 0x2030,
[0x8A] = 0x0679,
[0x8B] = 0x2039,
[0x8C] = 0x0152,
[0x8D] = 0x0686,
[0x8E] = 0x0698,
[0x8F] = 0x0688,
[0x90] = 0x06AF,
[0x91] = 0x2018,
[0x92] = 0x2019,
[0x93] = 0x201C,
[0x94] = 0x201D,
[0x95] = 0x2022,
[0x96] = 0x2013,
[0x97] = 0x2014,
[0x98] = 0x06A9,
[0x99] = 0x2122,
[0x9A] = 0x0691,
[0x9B] = 0x203A,
[0x9C] = 0x0153,
[0x9D] = 0x200C,
[0x9E] = 0x200D,
[0x9F] = 0x06BA,
[0xA0] = 0x00A0,
[0xA1] = 0x060C,
[0xA2] = 0x00A2,
[0xA3] = 0x00A3,
[0xA4] = 0x00A4,
[0xA5] = 0x00A5,
[0xA6] = 0x00A6,
[0xA7] = 0x00A7,
[0xA8] = 0x00A8,
[0xA9] = 0x00A9,
[0xAA] = 0x06BE,
[0xAB] = 0x00AB,
[0xAC] = 0x00AC,
[0xAD] = 0x00AD,
[0xAE] = 0x00AE,
[0xAF] = 0x00AF,
[0xB0] = 0x00B0,
[0xB1] = 0x00B1,
[0xB2] = 0x00B2,
[0xB3] = 0x00B3,
[0xB4] = 0x00B4,
[0xB5] = 0x00B5,
[0xB6] = 0x00B6,
[0xB7] = 0x00B7,
[0xB8] = 0x00B8,
[0xB9] = 0x00B9,
[0xBA] = 0x061B,
[0xBB] = 0x00BB,
[0xBC] = 0x00BC,
[0xBD] = 0x00BD,
[0xBE] = 0x00BE,
[0xBF] = 0x061F,
[0xC0] = 0x06C1,
[0xC1] = 0x0621,
[0xC2] = 0x0622,
[0xC3] = 0x0623,
[0xC4] = 0x0624,
[0xC5] = 0x0625,
[0xC6] = 0x0626,
[0xC7] = 0x0627,
[0xC8] = 0x0628,
[0xC9] = 0x0629,
[0xCA] = 0x062A,
[0xCB] = 0x062B,
[0xCC] = 0x062C,
[0xCD] = 0x062D,
[0xCE] = 0x062E,
[0xCF] = 0x062F,
[0xD0] = 0x0630,
[0xD1] = 0x0631,
[0xD2] = 0x0632,
[0xD3] = 0x0633,
[0xD4] = 0x0634,
[0xD5] = 0x0635,
[0xD6] = 0x0636,
[0xD7] = 0x00D7,
[0xD8] = 0x0637,
[0xD9] = 0x0638,
[0xDA] = 0x0639,
[0xDB] = 0x063A,
[0xDC] = 0x0640,
[0xDD] = 0x0641,
[0xDE] = 0x0642,
[0xDF] = 0x0643,
[0xE0] = 0x00E0,
[0xE1] = 0x0644,
[0xE2] = 0x00E2,
[0xE3] = 0x0645,
[0xE4] = 0x0646,
[0xE5] = 0x0647,
[0xE6] = 0x0648,
[0xE7] = 0x00E7,
[0xE8] = 0x00E8,
[0xE9] = 0x00E9,
[0xEA] = 0x00EA,
[0xEB] = 0x00EB,
[0xEC] = 0x0649,
[0xED] = 0x064A,
[0xEE] = 0x00EE,
[0xEF] = 0x00EF,
[0xF0] = 0x064B,
[0xF1] = 0x064C,
[0xF2] = 0x064D,
[0xF3] = 0x064E,
[0xF4] = 0x00F4,
[0xF5] = 0x064F,
[0xF6] = 0x0650,
[0xF7] = 0x00F7,
[0xF8] = 0x0651,
[0xF9] = 0x00F9,
[0xFA] = 0x0652,
[0xFB] = 0x00FB,
[0xFC] = 0x00FC,
[0xFD] = 0x200E,
[0xFE] = 0x200F,
[0xFF] = 0x06D2,
}
local map_unicode_to_1256 = {}
for code1256, code in pairs(map_1256_to_unicode) do
map_unicode_to_1256[code] = code1256
end
function string.fromutf8(utf8str)
local pos, result_1256 = 1, {}
while pos <= #utf8str do
local code, size = utf8_to_unicode(utf8str, pos)
pos = pos + size
code = code < 128 and code or map_unicode_to_1256[code] or ('?'):byte()
table_insert(result_1256, char(code))
end
return table_concat(result_1256)
end
function string.toutf8(str1256)
local result_utf8 = {}
for pos = 1, #str1256 do
local code = str1256:byte(pos)
table_insert(result_utf8, unicode_to_utf8(map_1256_to_unicode[code] or code))
end
return table_concat(result_utf8)
end
Usage is:
str:fromutf8() -- to convert from UTF-8 to cp1256
str:toutf8() -- to convert from cp1256 to UTF-8
Example:
-- This is cp1256 string
local str1256 = "1\128" -- "one euro" in cp1256
-- Convert it to UTF-8
local str_utf8 = str1256:toutf8() -- "1\226\130\172" -- one euro in utf-8
-- Convert it back from UTF-8 to cp1256
local str1256_2 = str_utf8:fromutf8()

Scrapy returns no output - just a [

I'm trying to run the spider found in this crawler and for simplicity sake I'm using this start_url because it is just a list of 320 movies. (So, the crawler won't run for 5 hours as given in the github page).
I crawl using scrapy crawl imdb -o output.json but the output.json file contains nothing. It has just a [ in it.
import scrapy
from texteval.items import ImdbMovie, ImdbReview
import urlparse
import math
import re
class ImdbSpider(scrapy.Spider):
name = "imdb"
allowed_domains = ["imdb.com"]
start_urls = [
# "http://www.imdb.com/chart/top",
# "http://www.imdb.com/chart/bottom"
"http://www.imdb.com/search/title?countries=csxx&sort=moviemeter,asc"
]
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.robotstxt.ROBOTSTXT_OBEY': True,
}
base_url = "http://www.imdb.com"
def parse(self, response):
movies = response.xpath("//*[#id='main']/table/tr/td[3]/a/#href")
for i in xrange(len(movies)):
l = self.base_url + movies[i].extract()
print l
request = scrapy.Request(l, callback=self.parse_movie)
yield request
next = response.xpath("//*[#id='right']/span/a")[-1]
next_url = self.base_url + next.xpath(".//#href")[0].extract()
next_text = next.xpath(".//text()").extract()[0][:4]
if next_text == "Next":
request = scrapy.Request(next_url, callback=self.parse)
yield request
'''
for sel in response.xpath("//table[#class='chart']/tbody/tr"):
url = urlparse.urljoin(response.url, sel.xpath("td[2]/a/#href").extract()[0].strip())
request = scrapy.Request(url, callback=self.parse_movie)
yield request
'''
def parse_movie(self, response):
movie = ImdbMovie()
i1 = response.url.find('/tt') + 1
i2 = response.url.find('?')
i2 = i2 - 1 if i2 > -1 else i2
movie['id'] = response.url[i1:i2]
movie['url'] = "http://www.imdb.com/title/" + movie['id']
r_tmp = response.xpath("//div[#class='titlePageSprite star-box-giga-star']/text()")
if r_tmp is None or r_tmp == "" or len(r_tmp) < 1:
return
movie['rating'] = int(float(r_tmp.extract()[0].strip()) * 10)
movie['title'] = response.xpath("//span[#itemprop='name']/text()").extract()[0]
movie['reviews_url'] = movie['url'] + "/reviews"
# Number of reviews associated with this movie
n = response.xpath("//*[#id='titleUserReviewsTeaser']/div/div[3]/a[2]/text()")
if n is None or n == "" or len(n) < 1:
return
n = n[0].extract().replace("See all ", "").replace(" user reviews", "")\
.replace(" user review", "").replace(",", "").replace(".", "").replace("See ", "")
if n == "one":
n = 1
else:
n = int(n)
movie['number_of_reviews'] = n
r = int(math.ceil(n / 10))
for x in xrange(1, r):
start = x * 10 - 10
url = movie['reviews_url'] + "?start=" + str(start)
request = scrapy.Request(url, callback=self.parse_review)
request.meta['movieObj'] = movie
yield request
def parse_review(self, response):
ranks = response.xpath("//*[#id='tn15content']/div")[0::2]
texts = response.xpath("//*[#id='tn15content']/p")
del texts[-1]
if len(ranks) != len(texts):
return
for i in xrange(0, len(ranks) - 1):
review = ImdbReview()
review['movieObj'] = response.meta['movieObj']
review['text'] = texts[i].xpath("text()").extract()
rating = ranks[i].xpath(".//img[2]/#src").re("-?\\d+")
if rating is None or rating == "" or len(rating) < 1:
return
review['rating'] = int(rating[0])
yield review
Can someone tell me where am I going wrong?
In my opinion, this web site should be load the list of movies use by js. Fristly, I suggest you should check the output about: movies = response.xpath("//*[#id='main']/table/tr/td[3]/a/#href"). If you want to get js content, you can use webkit in scrapy as a downloader middleware.