I have a spreadsheet of members designed as below:
My aim is to upload some columns and exclude others. In this case, I wish to upload only the name, age and email and exclude the others. I have been able to achieve this using the slice method as shown below:
def load_imported_members
spreadsheet = open_spreadsheet
spreadsheet.default_sheet = 'Worksheet'
header = spreadsheet.row(1)
(2..spreadsheet.last_row).map do |i|
row = Hash[[header, spreadsheet.row(i)].transpose]
member = Member.find_by_id(row["id"]) || Member.new
member.attributes = row.to_hash.slice("id", "name", "age", "email")
member
end
end
The problem is that last_row considers all the rows upto the last one (13), and since there are validations on the form, there are errors due to missing data as a result of the empty rows (which shouldn’t be considered). Is there a way I can upload only specific columns as I have done, yet limit to only the rows that have data?
You might want to chain the map call off of a reject filter like this example
You may just need to change the map line to this (assuming the missing rows all look like those above):
(2..spreadsheet.last_row).reject{|i| spreadsheet.row(i)[0] }.map do |i|
This is assuming the blank rows return as nil and that blank rows will always have all four desired fields blank as shown in the image. The reject call tests to see if spreadsheet.row(i)[0], the id column, is nil, if so the item is rejected from the list output given to map
Thanks for this question. I have learned some things from this question.
I have shortlisted your answer [note: use 'ROO' gem]
def load_imported_members(member)
spreadsheet = open_spreadsheet(member)
spreadsheet.each do |records|
record = #spreadsheet ? Hash[[#header, #spreadsheet.row(records)].transpose] : Hash[records] # transpose for xlsx records and
attributes = {id: record['id'], name: record['name'], email: record['email'], age: record['age']}
member_object = Member.new(attributes)
if member_object.valid?
if Member.find(attributes[:id])
Member.find(attributes[:id]).update(attributes)
else
member_object.save
end
end
end
end
You can parse your uploaded file using Roo gem.
def self.open_spreadsheet(member)
case File.extname(member.file.original_filename)
when ".csv" then
Roo::CSV.new(member.file.expiring_url, csv_options: {headers: true, skip_blanks: true, header_converters: ->(header) { header.strip }, converters: ->(data) { data ? data.strip : nil }})
when ".xlsx", ".xls" then
#spreadsheet = Roo::Spreadsheet.open(member.file.expiring_url)
#header = #spreadsheet.row(1)
(2..#spreadsheet.last_row)
end
end
Here I have used s3 uploaded url i.e expiring_url. Hope This will helpful. I have not tested. sorry, for small errors.
If you have used validations for name, email, and age. This will surely help u..
Related
Im trying to make a command that will store name,description and image of a character and another command to retrieve that data in an embed,but i have trouble working with json files
this is my code to add them:
#client.command()
async def addskillset(ctx):
await ctx.send("Let's add this skillset!")
questions = ["What is the monster name?","What is the monster description?","what is the monster image link?"]
answers = []
#code checking the questions results
embedkra = nextcord.Embed(title = f"{answers[0]}", description = f"{answers[1]}",color=ctx.author.color)
embedkra.set_image(url = f"{answers[2]}")
mess = await ctx.reply(embed=embedkra,mention_author=False)
await mess.add_reaction('✅')
await mess.add_reaction('❌')
def check(reaction, user):
return user == ctx.author and (str(reaction.emoji) == "✅" or "❌")
try:
reaction, user = await client.wait_for('reaction_add', timeout=1000.0, check=check)
except asyncio.TimeoutError:
#giving a message that the time is over
else:
if reaction.emoji == "✅":
monsters = await get_skillsets_data() #this data is added at the end
if str(monster_name) in monsters:
await ctx.reply("the monster is already added")
else:
monsters[str(monster_name)]["monster_name"] = {}
monsters[str(monster_name)]["monster_name"] = answers[0]
monsters[str(monster_name)]["monster_description"] = answers[1]
monsters[str(monster_name)]["monster_image"] = answers[2]
with open('skillsets.json','w') as f:
json.dump(monsters,f)
await mess.delete()
await ctx.reply(f"{answers[0]} successfully added to the list")
Code to get the embed with the asked info:
#client.command()
async def skilltest(ctx,*,monster_name):
data = open('skillsets.json').read()
data = json.loads(data)
if str(monster_name) in data:
name = data["monster_name"]
description = data["monster_description"]
link = data["monster_image"]
embedkra = nextcord.Embed(title = f"{name}", description = f"{description}",color=ctx.author.color)
embedkra.set_image(url = f"{link}")
await ctx.reply(embed=embedkra,mention_author=False)
else:
# otherwise, it is still None meaning we didn't find it
await ctx.reply("monster not found",mention_author=False)
and my json should look like this:
{"katufo": {"monster_name": "Katufo","Monster_description":"Katufo is the best","Monster_image":"#image_link"},
"armor claw":{"monster_name": "Armor Claw","Monster_description":"Armor claw is the best","Monster_image":#image_link}}
The get_skillsets_data used in first command:
async def get_skillsets_data():
with open('skillsets.json','r') as f:
monsters = json.load(f)
return monsters
Well, When you are trying to retrieve data from your json file try using name = data["katufo"]["monster_name"] now here it will only retrieve monster_name of key katufo. If You want to retrieve data for armor claw code must go like this name = data["armor claw"]["monster_name"]. So try this code :
#client.command()
async def skilltest(ctx,*,monster):
data = open('skillsets.json').read()
data = json.loads(data)
if str(monster) in data:
name = data[f"monster"]["monster_name"]
description = data[f"monster"]["Monster_description"]
link = data[f"monster"]["Monster_image"]
embedkra = nextcord.Embed(title = f"{name}", description = f"{description}",color=ctx.author.color)
embedkra.set_image(url = f"{link}")
await ctx.reply(embed=embedkra,mention_author=False)
else:
# otherwise, it is still None meaning we didn't find it
await ctx.reply("monster not found",mention_author=False)
Hope this works for you :)
If your json looks like what you showed above,
{
"katufo":{
"monster_name":"Katufo",
"Monster_description":"Katufo is the best",
"Monster_image":"#image_link"
},
"armor claw":{
"monster_name":"Armor Claw",
"Monster_description":"Armor claw is the best",
"Monster_image":"#image_link"
}
}
then there is no data["monster_name"] the two objects inside of your JSON are named katufo and armor_claw. To get one of them you can simply write data['katufo']['monster_name'] or data.katufo.monster_name.
Your problem stems from looking up the monster name like this:
if str(monster_name) in data:
name = data["monster_name"]
description = data["monster_description"]
link = data["monster_image"]
What you could do instead is loop through data, as it contains several monsters and then on each object, to the check that you do:
for monster in data:
if str(monster_name) in monster.values():
name = monster.monster_name
description = monster.Monster_description
link = monster.Monster_image
One thing to think about, the way the variables are named is not something I personally recommend. Don't be afraid of adding longer descriptive names so things make more sense for you in the code. Also, in the JSON you provided, there are certain attributes starting with a capital letter, something you should think about.
Edit:
Dicts in python are the equivalent of objects in Javascript and are initialized using the same syntax which we can see below:
monster_data = {}
But since you want a specific structure on these monsters we can go further and create a function called add_monster_object():
def add_monster_object(original_dict, new_monster):
new_monster = {
"monster_name": '',
"monster_description": '',
"monster_image": ''
}
#Now we have a new empty object with the correct names.
return original_dict.update(new_monster)
Now every time you run this function with a given name, in the dict there will be an object with that name. Example is if user writes armor_sword as the monster_name attribute, then we can call the function above as add_monster_object(original_dict, monster_name).
This will, if we take your initial dict as an example, return this:
{
"katufo":{
"monster_name":"Katufo",
"Monster_description":"Katufo is the best",
"Monster_image":"#image_link"
},
"armor claw":{
"monster_name":"Armor Claw",
"Monster_description":"Armor claw is the best",
"Monster_image":"#image_link"
},
"armor sword":{
"monster_name":"",
"monster_description":"",
"monster_image":""
}
}
Then you can populate them as you want, or update the function to take more parameters. The important part here is that you take a minute and figure out what you want to keep saved. Then make sure that you can read and write from file and you should have a somewhat simple structure going. Warning: This isn't a slap and dry method, you will also have to think about special cases, such as adding an object that already exists and soforth.
If you decide to go with Replit you could use their database to create similar functionality but you wouldn't have to worry about reading and writing to a file.
As it is right now, I still think you need to proceed with your bot, add some of the changes that I mentioned before the next actual problem arrives as there are many things that arent quite right. I also suggest you break everything into managing parts, 1 would be to read from a file. 2 would be to write. 3 to write a dict to a file. 4 to update a dict and soforth. Good luck!
I'm new to Python and using Python3 to display the data from my weather station
The problem I have is it used to work perfectly until I got a replacement station.
I found the problem
In the weather data sent there are 3 fields (not sure of the correct name) but they are
lightning_strike_last_distance
lightning_strike_last_distance_msg
lightning_strike_last_epoch
In my new station these fields are completely missing as there has been no lightning since I got the new one
As a result the station display just doesn't parse the weather data as those fields are not there.
How can I get the program to check if those fields/elements or whatever the correct name is, and if they are there parse them as usual
but if they are not there to skip those and move onto the next section
This is the relevant section of code
lightning_strike_last_distance = forecast_json["current_conditions"]["lightning_strike_last_distance"]
lightning1 = lightning_strike_last_distance*0.621371 #Convert kph to mph
data.lightning_strike_last_distance = "{0:.2f} miles".format(lightning1)
lightning_strike_last_epoch = forecast_json["current_conditions"]["lightning_strike_last_epoch"]
data.lightning_strike_last_epoch = time.strftime("%d-%m-%Y %H:%M:%S", time.localtime(lightning_strike_last_epoch))
How can I fix it so the program skips those 3 elements/sections if they are missing?
try following pattern:
lightning_strike_last_distance = forecast_json["current_conditions"]["lightning_strike_last_distance"] if "lightning_strike_last_distance" in forecast_json["current_conditions"] else None
It will set lightning_strike_last_distance to the value if it is present, and set it to None if it is not present.
Repeat that pattern for all other assignements.
to test it quickly try :
data = {"a":{"b":1,},}
valueB = data["a"]["b"] if "b" in data["a"] else None
valueC = data["a"]["c"] if "c" in data["a"] else None
print (valueB)
print (valueC)
I'm trying to update a file if it exists in a particular folder and has a specific name. In this instance the object in question is in a team drive. I followed documentation to compose the q parameter to the list call, tried to switch back to v2...It seems that the query is composed exactly correctly. That being said, even though I see multiple objects present in the target folder, the list call fails to see them. I've tried name = '' and name contains ''. There seems to be enough input validation put in place by the google team, as when i get creative the API bombs. Any pointers?
def import_or_replace_csv_to_td_folder(self, folder_id, local_fn, remote_fn, mime_type):
DRIVE = build('drive', 'v3', http=creds.authorize(Http()))
query = "'{0}' in parents and name = '{1}'.format(folder_id, remote_fn)
print("Searching for previous versions of this file : {0}".format(query))
check_if_already_exists = DRIVE.files().list(q=query, fields="files(id, name)").execute()
name_and_location_conflict = check_if_already_exists.get('files', [])
if not name_and_location_conflict:
body = {'name': remote_fn, 'mimeType': mime_type, 'parents': [folder_id]}
out = DRIVE.files().create(body=body, media_body=local_fn, supportsTeamDrives=True, fields='id').execute().get('id')
return out
else:
if len(name_and_location_conflict)==1:
file_id=name_and_location_conflict['id']
DRIVE.files().update(fileId=file_id, supportsTeamDrives=True, media_body=local_fn)
return file_id
else:
raise MultipleConflictsError("There are multiple documents matching parent folder and file name. Unclear which requires a version update")
When i tried to replace the 'name' parameter to 'title' (used to work in v2, based on some answers I reviewed) the API barfed
googleapiclient.errors.HttpError: <HttpError 400 when requesting https://www.googleapis.com/drive/v3/files?q=%27xxxxxxxxxxxxxxxx%27+in+parents+and+title+%3D+%27Somefile_2018-09-27.csv%27&fields=files%28id%2C+name%29&alt=json returned "Invalid Value">
Thanks #tehhowch,
Indeed extra measures are necessary when the target in a team drive, namely includeTeamDriveItems option needs to be set, otherwise TD locations are not included by default:
check_if_already_exists = DRIVE.files().list(
q=query,
fields="files(id, name)",
supportsTeamDrives=True,
includeTeamDriveItems=True
).execute()
Earlier I asked about gathering information from API-Link, and I have managed to get out most of the details by using the answar I got.
Now ny problem is when another API to get more information
This time the file will contain this information:
{
"username":"UserName",
"confirmed_rewards":"0",
"round_estimate":"0.00000000",
"total_hashrate":"0.000",
"payout_history":"0",
"round_shares":"0",
"workers":{
"UserName.1":{
"alive":"0",
"hashrate":"0.000"
},
"UserName.2":{
"alive":"0",
"hashrate":"0.000"
},
"UserName.3":{
"alive":"1",
"hashrate":"1517.540",
"last_share_timestamp":1369598007
},
"UserName.4":{
"alive":"0",
"hashrate":"0.000"
}
}
}
And I want to gather each of the workers and print them out. This "workers" could contain multiple information, but always start with "UserName.x", where the username come from the "username" paramter each time.
The numbers will always vary from 0 and up
I want to gether the information in the same way by accessing the document, and decode and print out all the workers, whatever the numbers of them are.
By using the script provided in my last question(look at the link in the start), i was thinking that it would be something like
local t = json.decode( txt )
print("Workers: ".. t["workers.UserName.1"])
But this was not the way.
Due to the username changing all the time, I was also thinking somthing like
print("Workers: ".. t["workers" .. "." .. "username" .. "." .. "1"])
From here I have no clue about how I should gather the information, even when the names and numbers vary
Thanks in advance
Here is the perfect solution:
local json = require "json"
local t = json.decode( jsonFile( "data.json" )
local workers = t.workers
for name, user in pairs(workers) do
print("--------------------")
print(name)
for tag, value in pairs(user) do
print(tag , value)
end
end
Here are some more info:
http://www.coronalabs.com/blog/2011/08/03/tutorial-exploring-json-usage-in-corona/
http://www.coronalabs.com/blog/2011/06/21/understanding-lua-tables-in-corona-sdk/
http://lua-users.org/wiki/TablesTutorial
I'm trying to process the following with an JSON Input step:
{"address":[
{"AddressId":"1_1","Street":"A Street"},
{"AddressId":"1_101","Street":"Another Street"},
{"AddressId":"1_102","Street":"One more street", "Locality":"Buenos Aires"},
{"AddressId":"1_102","Locality":"New York"}
]}
However this seems not to be possible:
Json Input.0 - ERROR (version 4.2.1-stable, build 15952 from 2011-10-25 15.27.10 by buildguy) :
The data structure is not the same inside the resource!
We found 1 values for json path [$..Locality], which is different that the number retourned for path [$..Street] (3509 values).
We MUST have the same number of values for all paths.
The step provides Ignore Missing Path flag but it only works if all the rows misses the same path. In that case that step acts as as expected an fills the missing values with null.
This limits the power of this step to read uneven data, which was really one of my priorities.
My step Fields are defined as follows:
Am I missing something? Is this the correct behavior?
What I have done is use JSON Input using $.address[*] to read to a jsonRow field the full map of each element p.e:
{"address":[
{"AddressId":"1_1","Street":"A Street"},
{"AddressId":"1_101","Street":"Another Street"},
{"AddressId":"1_102","Street":"One more street", "Locality":"Buenos Aires"},
{"AddressId":"1_102","Locality":"New York"}
]}
This results in 4 jsonRows one for each element, p.e. jsonRow = {"AddressId":"1_101","Street":"Another Street"}. Then using a Javascript step I map my values using this:
var AddressId = getFromMap('AddressId', jsonRow);
var Street = getFromMap('Street', jsonRow);
var Locality = getFromMap('Locality', jsonRow);
In a second script tab I inserted minified JSON parse code from https://github.com/douglascrockford/JSON-js and the getFromMap function:
function getFromMap(key,jsonRow){
try{
var map = JSON.parse(jsonRow);
}
catch(e){
var message = "Unparsable JSON: "+jsonRow+" Desc: "+e.message;
var nr_errors = 1;
var field = "jsonRow";
var errcode = "JSON_PARSE";
_step_.putError(getInputRowMeta(), row, nr_errors, message, field, errcode);
trans_Status = SKIP_TRANSFORMATION;
return null;
}
if(map[key] == undefined){
return null;
}
trans_Status = CONTINUE_TRANSFORMATION;
return map[key]
}
You can solve this by changing the JSONPath and splitting up the steps in two JSON input steps. The following website explains a lot about JSONPath: http://goessner.net/articles/JsonPath/
$..AddressId
Does in fact return all the AddressId's in the address array, BUT since Pentaho is using grid rows for input and output [4 rows x 3 columns], it can't handle a missing value aka null value when you want as results return all the Streets (3 rows) and return all the Locality (2 rows), simply because there are no null values in the array itself as in you can't drive out of your garage with 3 wheels on your car instead of the usual 4.
I guess your script returns null (where X is zero) values like:
A S X
A S X
A S L
A X L
The scripting step can be avoided same by changing the Fields path of the first JSONinput step into:
$.address[*]
This is to retrieve all the 4 address lines. Create a next JSONinput step based on the new source field which contains the address line(s) to retrieve the address details per line:
$.AddressId
$.Street
$.Locality
This yields the null values on the four address lines when a address details is not available in an address line.