Google Cloud Functions - How to set up a function (trading bot) - google-cloud-functions

I would like to set up a trading bot via Google Cloud to run around the clock.
In Google Cloud Functions I use the Inline editor with runtime Python 3.7.
I have two questions:
1) Main.py section: Here I copied the full code of my Python script (Trading Bot) - see code below for reference (which works well when run as a script in my IDE Spyder).
However, below Google asks to provide a function to execute. However, my code is just a script with no main function. Can I just put at the top of the code e.g.: "def trading_bot(self):" and indent the remaining part below?
While the code as a script copied below works well, if I add the "def trading_bot(self):" at the top in my IDE (Spyder), the code doesnt seem to work properly...How can I make sure the code within the function runs properly, when I call the function from Google Cloud (or from my IDE).
2) Requirements.txt section: Can you provide guidance what exactly I need to put there, i.e. can I look up the dependencies used in my code somewhere? I use Anaconda for distribution, the classes imported for the script are at the top of the script provided below.
Thanks for any help. Glad also for your advice if you think Google Cloud Functions is not the best approach to run a trading bot but it seemed to me to be the simplest solution.
import bitmex
import json
from time import sleep
from bitmex_websocket import BitMEXWebsocket
import logging, time, requests
import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime
import math
from statistics import mean
#-------------------------
#variable
symbol = "XBTUSD"
#standard API connection
api_key = "XXX"
api_secret = "XXX"
#True for testnet
client = bitmex.bitmex(test=False, api_key=api_key, api_secret=api_secret)
#------------------
# Trading algorithm
symbol = "XBTUSD"
ordType = 'Stop'
#starting order quantity
orderQty = 1
leftBars = 6
rightBars = 2
#round to 0.5
def round_to_BTC(n):
result = round(n*2)/2
return result
t=1
while t < 1000000:
time_now = (time.strftime('%H:%M:%S', time.localtime(int(time.time()))))
t_now = time_now[6:8]
t1 = "00"
t2 = "59"
FMT = '%S'
def days_hours_minutes_seconds(td):
return td.days, td.seconds//3600, (td.seconds//60)%60, td.seconds
if t_now == str('00'):
#give 1 second to candlestick to properly close
sleep(1)
elif t_now > str('00') and t_now <= str('59'):
s1 = datetime.strptime(t2, FMT) - datetime.strptime(t_now, FMT)
s1_seconds = days_hours_minutes_seconds(s1)[3]+2
sleep(s1_seconds)
else:
pass
time_now = (time.strftime('%H:%M:%S', time.localtime(int(time.time()))))
print("The time is now: " + time_now)
#most recent swing candles, get highs and lows / #binsizes = {"1m": 1, "5m": 5, "1h": 60, "1d": 1440}
#+1 is the middle bar
totalBars = leftBars + rightBars + 1
swing_candles = client.Trade.Trade_getBucketed(symbol=symbol, binSize="1m", count=totalBars, reverse=True).result()[0]
last_highs = []
last_lows = []
i=0
while i <= (len(swing_candles)-1):
last_highs.append(swing_candles[i]["high"])
last_lows.append(swing_candles[i]["low"])
i += 1
#get the highest high and the lowest low
highest_high = max(last_highs)
lowest_low = min(last_lows)
#check if there are existing positions & orders
if client.Position.Position_get().result()[0] != []:
positions_quantity = client.Position.Position_get().result()[0][0]["currentQty"]
else:
positions_quantity = 0
#check existing orders
buy_orders_quantity = []
sell_orders_quantity = []
orders_quantity = client.Order.Order_getOrders(filter=json.dumps({"open": True})).result()[0]
h=0
while h <= len(orders_quantity)-1:
if orders_quantity[h]["side"] == "Sell":
sell_orders_quantity.append(orders_quantity[h])
elif orders_quantity[h]["side"] == "Buy":
buy_orders_quantity.append(orders_quantity[h])
h += 1
if highest_high == last_highs[rightBars] and positions_quantity == 0:
if buy_orders_quantity == []:
client.Order.Order_new(symbol = symbol, orderQty = orderQty*1, side = "Buy", ordType = 'Stop', stopPx = (highest_high+0.5), execInst ='LastPrice' ).result()
elif buy_orders_quantity != []:
orderID = buy_orders_quantity[0]["orderID"]
client.Order.Order_amend(orderID=orderID, orderQty=orderQty*1, stopPx = (highest_high+0.5)).result()
else:
pass
elif highest_high == last_highs[rightBars] and positions_quantity > 0:
#dont place any additional long
pass
elif highest_high == last_highs[rightBars] and positions_quantity < 0:
if buy_orders_quantity != []:
orderID = buy_orders_quantity[0]["orderID"]
client.Order.Order_amend(orderID=orderID, orderQty=orderQty*2, stopPx = (highest_high+0.5)).result()
else:
client.Order.Order_new(symbol = symbol, orderQty = (orderQty)*2, side = "Buy", ordType = 'Stop', stopPx = (highest_high+0.5), execInst ='LastPrice' ).result()
elif lowest_low == last_lows[rightBars] and positions_quantity == 0:
if sell_orders_quantity == []:
client.Order.Order_new(symbol = symbol, orderQty = (orderQty)*-1, side = "Sell", ordType = 'Stop', stopPx = (lowest_low-0.5), execInst ='LastPrice' ).result()
elif sell_orders_quantity != []:
orderID = sell_orders_quantity[0]["orderID"]
client.Order.Order_amend(orderID=orderID, orderQty=orderQty*-1, stopPx = (lowest_low-0.5)).result()
else:
pass
elif lowest_low == last_lows[rightBars] and positions_quantity < 0:
#dont place any additional shorts
pass
elif lowest_low == last_lows[rightBars] and positions_quantity > 0:
if sell_orders_quantity != []:
orderID = sell_orders_quantity[0]["orderID"]
client.Order.Order_amend(orderID=orderID, orderQty=orderQty*-2, stopPx = (lowest_low-0.5)).result()
else:
client.Order.Order_new(symbol = symbol, orderQty = (orderQty)*-2, side = "Sell", ordType = 'Stop', stopPx = (lowest_low-0.5), execInst ='LastPrice' ).result()
positions_quantity = client.Position.Position_get().result()[0][0]["currentQty"]
buy_orders_quantity = []
sell_orders_quantity = []
orders_quantity = client.Order.Order_getOrders(filter=json.dumps({"open": True})).result()[0]
h=0
while h <= len(orders_quantity)-1:
if orders_quantity[h]["side"] == "Sell":
sell_orders_quantity.append(orders_quantity[h])
elif orders_quantity[h]["side"] == "Buy":
buy_orders_quantity.append(orders_quantity[h])
h += 1
if positions_quantity > 0:
if sell_orders_quantity != []:
orderID = sell_orders_quantity[0]["orderID"]
client.Order.Order_amend(orderID=orderID, orderQty=orderQty*-2).result()
elif positions_quantity < 0:
if buy_orders_quantity != []:
orderID = buy_orders_quantity[0]["orderID"]
client.Order.Order_amend(orderID=orderID, orderQty=orderQty*2).result()
print("Your current position is " + str(positions_quantity))
print("This is iteration: " + str(t))
t += 1

As concerns my second question, I solved it in the following way:
In the command terminal, type: pip freeze > requirements.txt
The file contains all dependencies.
As concerns question 1 I still dont understand what code exactly needs to be put in the section main.py.
Thanks!

Cloud Functions is not an adequate product for your use case. They are mostly used for lightweight calculations or not high resource consuming methods.
The magic of CF consists in that they execute your code whenever you hit the URL that belongs to it. This is important to understand for your question number 1. If you want your function to work, you need to always create a method that accepts the "request" parameter. As it is the information from the HTTP request made when the URL is hit.
You can take a look at this document for reference.
You function should always start like this
from flask #import your dependencies
def my_awesome_function(request):
#Your logic
In this case you should write "my_awesome_function" on the Function to Execute textbox.
You also have to be careful with your resources, as CF have 5 presentations. They differ in CPU and Memory you can read more about this here.
This, among many reasons, you should not use Cloud Functions for your bot. I could recommend you to use a virtual machine, but activities related to use of the Services for cryptocurrency mining without Google's prior written approval are frowned upon and may result in the deactivation of your product as stated in the terms of service.

Related

Creating a menu, adding the input to a list or csv file and display

I am trying to create a programme that allows me to add, remove, amend and display details. I have created a general class and an additional class, that allowed me to hold an empty list and add/remove details.
I am now struggling with my menu and how to add further details and display these. Can anyone help, please?
from AircraftInspection_2 import AircraftInspection_2
from RoutineInspection import RoutineInspection
inspection = RoutineInspection()
end_of_add_String = "X to return to main menu\n"
def display_inspection_inventory():
#if inspection.number_of_aircrafts == 0
#return
print (f'{"SN":>3} {"TN":<7} {"AFhrs":<7} {"Zone":<3} {"Corrosion":<3} {"Component":<8} {"Fault":<8} {"Actionalbe":<5}')
for count aircraft in enumerate(inspection.aircrafts):
print(f'{count + 1:>3} {aircraft.Serial_Number:>3} {aircraft.Tail_No:<7} {aircraft.AF_hrs:<7} {aircraft.Zone:<3} {aircraft.Corrosion:<3} {aircraft.Component:<8} {aircraft.Fault:<8} {aircraft.Actionable:<5}')
def menu_options():
print("Menu: ")
print("Key A - Add details.")
print("Key R - Remove details.")
print("Key C - Change details.")
print("Key D - Display details.")
print("Key X - Exit the programme")
print(" ")
menu_options()
def menu_choice():
letters = {"a", "r", "c", "d", "x"}
select = input()
while any(letter in letters for letter in select) and len(select) == 1:
pass
return select
#menu_choice()
def add_aircraft():
aircraft = get_aircraft_details()
try:
routineinspection.add(aircraft)
except ValueError as e:
print(f"{str(e)}")
def get_aircraft_details():
Tail_No = input("Please enter tailnumber: ")
AF_hours = int(input("Enter airframe hours: "))
Zone = input("Enter inspected zone: ")
Corrosion = int(input("Enter corrosion level: "))
Component = input("Enter inspected component: ")
Fault = input("Enter type of fault: ")
Actionable = bool(input("Enter 1 if actionable, 0 if not: "))
aircraft = Aircraft(Tail_No = Tail_No, AF_hours = AF_hours, Zone = Zone, Corrosion = Corrosion, Component = Component, Fault = Fault, Actionable = Actionable)
return aircraft
def remove_aircraft():
routineinspection.remove(aircraft)
while True:
display_inspection_inventory()
menu_options()
menu_choice()
option = menu_choice().upper()
if option == "A":
add_aircraft()
elif option == "R":
remove_aircraft()
#elif option =="C":
#amend_aircraft()
elif option == "X":
confirm = input("Do you want to quit the programme, y or n?").lower()
if confirm == "y":
break
else:
pass
print("See you again soon.")

Problem with PettingZoo and Stable-Baselines3 with a ParallelEnv

I am having trouble in making things work with a Custom ParallelEnv I wrote by using PettingZoo. I am using SuperSuit's ss.pettingzoo_env_to_vec_env_v1(env) as a wrapper to Vectorize the environment and make it work with Stable-Baseline3 and documented here.
You can find attached a summary of the most relevant part of the code:
from typing import Optional
from gym import spaces
import random
import numpy as np
from pettingzoo import ParallelEnv
from pettingzoo.utils.conversions import parallel_wrapper_fn
import supersuit as ss
from gym.utils import EzPickle, seeding
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
petting_zoo = env
class parallel_env(ParallelEnv, EzPickle):
metadata = {'render_modes': ['ansi'], "name": "PlayerEnv-Multi-v0"}
def __init__(self, n_agents: int = 20, new_step_api: bool = True) -> None:
EzPickle.__init__(
self,
n_agents,
new_step_api
)
self._episode_ended = False
self.n_agents = n_agents
self.possible_agents = [
f"player_{idx}" for idx in range(n_agents)]
self.agents = self.possible_agents[:]
self.agent_name_mapping = dict(
zip(self.possible_agents, list(range(len(self.possible_agents))))
)
self.observation_spaces = spaces.Dict(
{agent: spaces.Box(shape=(len(self.agents),),
dtype=np.float64, low=0.0, high=1.0) for agent in self.possible_agents}
)
self.action_spaces = spaces.Dict(
{agent: spaces.Discrete(4) for agent in self.possible_agents}
)
self.current_step = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def __calculate_observation(self, agent_id: int) -> np.ndarray:
return self.observation_space(agent_id).sample()
def __calculate_observations(self) -> np.ndarray:
observations = {
agent: self.__calculate_observation(
agent_id=agent)
for agent in self.agents
}
return observations
def observe(self, agent):
return self.__calculate_observation(agent_id=agent)
def step(self, actions):
if self._episode_ended:
return self.reset()
observations = self.__calculate_observations()
rewards = random.sample(range(100), self.n_agents)
self.current_step += 1
self._episode_ended = self.current_step >= 100
infos = {agent: {} for agent in self.agents}
dones = {agent: self._episode_ended for agent in self.agents}
rewards = {
self.agents[i]: rewards[i]
for i in range(len(self.agents))
}
if self._episode_ended:
self.agents = {} # To satisfy `set(par_env.agents) == live_agents`
return observations, rewards, dones, infos
def reset(self,
seed: Optional[int] = None,
return_info: bool = False,
options: Optional[dict] = None,):
self.agents = self.possible_agents[:]
self._episode_ended = False
self.current_step = 0
observations = self.__calculate_observations()
return observations
def render(self, mode="human"):
# TODO: IMPLEMENT
print("TO BE IMPLEMENTED")
def close(self):
pass
Unfortunately when I try to test with the following main procedure:
from stable_baselines3 import DQN, PPO
from stable_baselines3.common.env_checker import check_env
from dummy_env import dummy
from pettingzoo.test import parallel_api_test
if __name__ == '__main__':
# Testing the parallel algorithm alone
env_parallel = dummy.parallel_env()
parallel_api_test(env_parallel) # This works!
# Testing the environment with the wrapper
env = dummy.petting_zoo()
# ERROR: AssertionError: The observation returned by the `reset()` method does not match the given observation space
check_env(env)
# Model initialization
model = PPO("MlpPolicy", env, verbose=1)
# ERROR: ValueError: could not broadcast input array from shape (20,20) into shape (20,)
model.learn(total_timesteps=10_000)
I get the following error:
AssertionError: The observation returned by the `reset()` method does not match the given observation space
If I skip check_env() I get the following one:
ValueError: could not broadcast input array from shape (20,20) into shape (20,)
It seems like that ss.pettingzoo_env_to_vec_env_v1(env) is capable of splitting the parallel environment in multiple vectorized ones, but not for the reset() function.
Does anyone know how to fix this problem?
Plese find the Github Repository to reproduce the problem.
You should double check the reset() function in PettingZoo. It will return None instead of an observation like GYM
Thanks to discussion I had in the issue section of the SuperSuit repository, I am able to post the solution to the problem. Thanks to jjshoots!
First of all it is necessary to have the latest SuperSuit version. In order to get that I needed to install Stable-Baseline3 using the instructions here to make it work with gym 0.24+.
After that, taking the code in the question as example, it is necessary to substitute
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
with
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
env_ = ss.concat_vec_envs_v1(env_, 1, base_class="stable_baselines3")
return env_
The outcomes are:
Outcome 1: leaving the line with check_env(env) I got an error AssertionError: Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py
Outcome 2: removing the line with check_env(env), the agent starts training successfully!
In the end, I think that the argument base_class="stable_baselines3" made the difference.
Only the small problem on check_env remains to be reported, but I think it can be considered as trivial if the training works.

Is Caffe's reported accuracy reliable?

I recently tried to get the confusion matrix for one of my trained models, to see how precise it is. I downloaded this script and fed my model. To my astonishment, the accuracy calculated by the script, is very different than the one, Caffe reports.
I have used this script to calculate the confusion matrix, this however, reports the accuracy as well, the problem is the accuracy reported by this script is way different that the one reported by Caffe! For example Caffe reports the accuracy lets say for CIFAR10, as 92.34%, while, when the model is fed to the script to calculate confusion matrix and its accuracy, it results in for example something like 86.5%!
Which one of these accuracies are the correct one, and can be reported in papers or compared with the results of other papers such as those here ?
I also saw something weird again, I trained two identical models, with only one difference, that being one used Xavier, and the other used msra for initialization. The first one reports an accuracy of 94.25 and the other reports 94.26 in Caffe. when these models are fed to the script I linked above, for confusion matrix computations. their accuracies were 89.2% and 87.4% respectively!
Is this normal? what is the cause for this? msra?
Are the accuracies reported by Caffe true and reliable?
PS: The accuracy in the script is calculated as (complete script):
for i, image, label in reader:
image_caffe = image.reshape(1, *image.shape)
out = net.forward_all(data=np.asarray([ image_caffe ]))
plabel = int(out['prob'][0].argmax(axis=0))
count += 1
iscorrect = label == plabel
correct += (1 if iscorrect else 0)
matrix[(label, plabel)] += 1
labels_set.update([label, plabel])
if not iscorrect:
print("\rError: i=%s, expected %i but predicted %i" \
% (i, label, plabel))
sys.stdout.write("\rAccuracy: %.1f%%" % (100.*correct/count))
sys.stdout.flush()
print(", %i/%i corrects" % (correct, count))
Which imho is OK and correct. the number of correct predictions, divided by the total number of instances in the dataset.
I found the reason.
The reason for the mismatch between Caffe generated accuracy and the accuract generated by the script in question, was solely because of mean-subtraction, which was done in caffe, and not in the script.
This is the modified version of script which takes this into account and hopefully everything is just fine.
# Author: Axel Angel, copyright 2015, license GPLv3.
# added mean subtraction so that, the accuracy can be reported accurately just like caffe when doing a mean subtraction
# Seyyed Hossein Hasan Pour
# Coderx7#Gmail.com
# 7/3/2016
import sys
import caffe
import numpy as np
import lmdb
import argparse
from collections import defaultdict
def flat_shape(x):
"Returns x without singleton dimension, eg: (1,28,28) -> (28,28)"
return x.reshape(filter(lambda s: s > 1, x.shape))
def lmdb_reader(fpath):
import lmdb
lmdb_env = lmdb.open(fpath)
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
for key, value in lmdb_cursor:
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
label = int(datum.label)
image = caffe.io.datum_to_array(datum).astype(np.uint8)
yield (key, flat_shape(image), label)
def leveldb_reader(fpath):
import leveldb
db = leveldb.LevelDB(fpath)
for key, value in db.RangeIter():
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
label = int(datum.label)
image = caffe.io.datum_to_array(datum).astype(np.uint8)
yield (key, flat_shape(image), label)
def npz_reader(fpath):
npz = np.load(fpath)
xs = npz['arr_0']
ls = npz['arr_1']
for i, (x, l) in enumerate(np.array([ xs, ls ]).T):
yield (i, x, l)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--proto', type=str, required=True)
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--mean', type=str, required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--lmdb', type=str, default=None)
group.add_argument('--leveldb', type=str, default=None)
group.add_argument('--npz', type=str, default=None)
args = parser.parse_args()
# Extract mean from the mean image file
mean_blobproto_new = caffe.proto.caffe_pb2.BlobProto()
f = open(args.mean, 'rb')
mean_blobproto_new.ParseFromString(f.read())
mean_image = caffe.io.blobproto_to_array(mean_blobproto_new)
f.close()
count = 0
correct = 0
matrix = defaultdict(int) # (real,pred) -> int
labels_set = set()
# CNN reconstruction and loading the trained weights
net = caffe.Net(args.proto, args.model, caffe.TEST)
caffe.set_mode_cpu()
print "args", vars(args)
if args.lmdb != None:
reader = lmdb_reader(args.lmdb)
if args.leveldb != None:
reader = leveldb_reader(args.leveldb)
if args.npz != None:
reader = npz_reader(args.npz)
for i, image, label in reader:
image_caffe = image.reshape(1, *image.shape)
out = net.forward_all(data=np.asarray([ image_caffe ])- mean_image)
plabel = int(out['prob'][0].argmax(axis=0))
count += 1
iscorrect = label == plabel
correct += (1 if iscorrect else 0)
matrix[(label, plabel)] += 1
labels_set.update([label, plabel])
if not iscorrect:
print("\rError: i=%s, expected %i but predicted %i" \
% (i, label, plabel))
sys.stdout.write("\rAccuracy: %.1f%%" % (100.*correct/count))
sys.stdout.flush()
print(", %i/%i corrects" % (correct, count))
print ""
print "Confusion matrix:"
print "(r , p) | count"
for l in labels_set:
for pl in labels_set:
print "(%i , %i) | %i" % (l, pl, matrix[(l,pl)])

Reading binary .SAVE files?

I was wondering how to open or read a binary file that has been saved in octave, with the extension .SAVE? I have tried opening it with MATLAB, using the 'load' function in octave, but nothing seems to be working. I'm trying to understand someone else's code and they have saved the output of a simulation in this file.
The Octave binary format is briefly described in the comments before the function read_binary_data() in load-save.cc.
Are you sure the file is in "Octave binary format". The file ending ".SAVE" can be choosen arbitrary so this could also be CSV, gzipped...
You can run "file yourfile.SAFE" and paste the output or check the first bytes of your file if they are "Octave-1-L" or "Octave-1-B".
If you want to use these files from another program than GNU Octave I would suggest loading it in Octave and safe it in an other format. See "help save" for a list of supported formats.
EDIT:
Because the initial poster asked: Of course you can use GNU Octave from the terminal (no need for the GUI and I don't now to which software part you refer when you are using the phrase "octave GUI", see here Octave FAQ). Just install it for your used platform install instructions on wiki.octave.org and run it.
Code for reading octave binary save files in python 2/3.
Tested on:
strings
single and double precision real and complex floats
various integer types
scalar, matrix and array
Unsupported:
struct
cell array
...
Python code:
# This code is public domain
from __future__ import print_function
import sys
from collections import OrderedDict
import numpy as np
if sys.version_info[0] > 2:
def tostr(s):
return s.decode('utf8')
def decode(s, encoding='utf8'):
return s.decode(encoding)
STR_ENCODING = 'utf8'
else:
def tostr(s):
return s
def decode(s, encoding='utf8'):
return unicode(s, encoding)
STR_ENCODING = None
DATA_TYPES = {
1: "scalar",
2: "matrix",
3: "complex scalar",
4: "complex matrix",
5: "old_string",
6: "range",
7: "string",
}
TYPE_CODES = {
0: "u1",
1: "u2",
2: "u4",
3: "i1",
4: "i2",
5: "i4",
6: "f4",
7: "f8",
8: "u8",
9: "i8",
}
DTYPES = {k: np.dtype(v) for k, v in TYPE_CODES.items()}
def loadoct(fd, encoding=STR_ENCODING):
"""
Read an octave binary file from the file handle fd, returning
an array of structures. If encoding is not None then convert
strings from bytes to unicode. Default is STR_ENCODING, which
is utf8 for python 3 and None for python 2, yielding arrays
of type str in each dialect.
"""
magic = fd.read(10)
assert(magic == b"Octave-1-L" or magic == b"Octave-1-B")
endian = "<" if magic[-1:] == b"L" else ">"
# Float type is 0: IEEE-LE, 1: IEEE-BE, 2: VAX-D, 3: VAX-G, 4: Cray
# Not used since Octave assumes IEEE format floats.
_float_format = fd.read(1)
len_dtype = np.dtype(endian + "i4")
def read_len():
len_bytes = fd.read(4)
if not len_bytes:
return None
return np.frombuffer(len_bytes, len_dtype)[0]
table = OrderedDict()
while True:
name_length = read_len()
if name_length is None: # EOF
break
name = tostr(fd.read(name_length))
doc_length = read_len()
doc = tostr(fd.read(doc_length)) if doc_length else ''
is_global = bool(ord(fd.read(1)))
data_type = ord(fd.read(1))
if data_type == 255:
type_str = tostr(fd.read(read_len()))
else:
type_str = DATA_TYPES[data_type]
#print("reading", name, type_str)
if type_str.endswith("scalar"):
if type_str == "scalar":
dtype = DTYPES[ord(fd.read(1))]
elif type_str == "complex scalar":
_ = fd.read(1)
dtype = np.dtype('complex128')
elif type_str == "float complex scalar":
_ = fd.read(1)
dtype = np.dtype('complex64')
else:
dtype = np.dtype(type_str[:-7])
dtype = dtype.newbyteorder(endian)
data = np.frombuffer(fd.read(dtype.itemsize), dtype)
table[name] = data[0]
elif type_str.endswith("matrix"):
ndims = read_len()
if ndims < 0:
ndims = -ndims
dims = np.frombuffer(fd.read(4*ndims), len_dtype)
else:
dims = (ndims, read_len())
count = np.prod(dims)
if type_str == "matrix":
dtype = DTYPES[ord(fd.read(1))]
elif type_str == "complex matrix":
_ = fd.read(1)
dtype = np.dtype('complex128')
elif type_str == "float complex matrix":
_ = fd.read(1)
dtype = np.dtype('complex64')
else:
dtype = np.dtype(type_str[:-7])
dtype = dtype.newbyteorder(endian)
data = np.frombuffer(fd.read(count*dtype.itemsize), dtype)
# Note: Use data.copy() to make a modifiable array.
table[name] = data.reshape(dims, order='F')
elif type_str == "old_string":
data = fd.read(read_len())
if encoding is not None:
data = decode(data, encoding)
table[name] = data
elif type_str in ("string", "sq_string"):
nrows = read_len()
if nrows < 0:
ndims = -nrows
dims = np.frombuffer(fd.read(4*ndims), len_dtype)
count = np.prod(dims)
fortran_order = np.frombuffer(fd.read(count), dtype='uint8')
c_order = np.ascontiguousarray(fortran_order.reshape(dims, order='F'))
data = c_order.view(dtype='|S'+str(dims[-1]))
if encoding is not None:
data = np.array([decode(s, encoding) for s in data.flat])
table[name] = data.reshape(dims[:-1])
else:
data = [fd.read(read_len()) for _ in range(nrows)]
if encoding is not None:
data = [decode(s, encoding) for s in data]
table[name] = np.array(data)
else:
raise NotImplementedError("unknown octave type "+type_str)
#print("read %s:%s"%(name, type_str), table[name])
return table
def _dump(filename, encoding=STR_ENCODING):
import gzip
if filename.endswith('.gz'):
with gzip.open(filename, 'rb') as fd:
table = loadoct(fd, encoding)
else:
with open(filename, 'rb') as fd:
table = loadoct(fd, encoding)
for k, v in table.items():
print(k, v)
if __name__ == "__main__":
#_dump(sys.argv[1], encoding='utf8') # unicode
#_dump(sys.argv[1], encoding=None) # bytes
_dump(sys.argv[1]) # str, encoding=STR_ENCODING

Easier way to create a JSON object from an SQLObject

EDIT -- took the code from below and made it so it can handle ForiegnKeys, Decimal numbers (although i'm doing a very forced float conversion). It returns a dict now so it can be recursive.
from sqlobject import SQLObject
from decimal import Decimal
def sqlobject_to_dict(obj):
json_dict = {}
cls_name = type(obj)
for attr in vars(cls_name):
if isinstance(getattr(cls_name, attr), property):
attr_value = getattr(obj, attr)
attr_class = type(attr_value)
attr_parent = attr_class.__bases__[0]
if isinstance(getattr(obj, attr), Decimal):
json_dict[attr] = float(getattr(obj, attr))
elif attr_parent == SQLObject:
json_dict[attr] = sqlobject_to_dict(getattr(obj, attr))
else:
json_dict[attr] = getattr(obj, attr)
return json_dict
EDIT -- changed to add the actual data model -- there are generated values that need to be accessed and Decimal() columns that need dealing with as well.
So I've seen this: return SQL table as JSON in python but it's not really what I'm looking for -- that's "brute force" -- you need to know the names of the attributes of the object in order to generate the JSON response.
What I'd like to do is something like this (the name of the class and it's attributes are not-important)
class BJCPStyle(SQLObject):
name = UnicodeCol(length=128, default=None)
beer_type = UnicodeCol(length=5, default=None)
category = ForeignKey('BJCPCategory')
subcategory = UnicodeCol(length=1, default=None)
aroma = UnicodeCol(default=None)
appearance = UnicodeCol(default=None)
flavor = UnicodeCol(default=None)
mouthfeel = UnicodeCol(default=None)
impression = UnicodeCol(default=None)
comments = UnicodeCol(default=None)
examples = UnicodeCol(default=None)
og_low = SGCol(default=None)
og_high = SGCol(default=None)
fg_low = SGCol(default=None)
fg_high = SGCol(default=None)
ibu_low = IBUCol(default=None)
ibu_high = IBUCol(default=None)
srm_low = SRMCol(default=None)
srm_high = SRMCol(default=None)
abv_low = DecimalCol(size=3, precision=1, default=None)
abv_high = DecimalCol(size=3, precision=1, default=None)
versions = Versioning()
def _get_combined_category_id(self):
return "%s%s" % (self.category.category_id, self.subcategory)
def _get_og_range(self):
low = self._SO_get_og_low()
high = self._SO_get_og_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.3f - %.3f" % (low, high)
def _get_fg_range(self):
low = self._SO_get_fg_low()
high = self._SO_get_fg_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.3f - %.3f" % (low, high)
def _get_srm_range(self):
low = self._SO_get_srm_low()
high = self._SO_get_srm_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.1f - %.1f" % (low, high)
def _get_abv_range(self):
low = self._SO_get_abv_low()
high = self._SO_get_abv_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.2f%% - %.2f%%" % (low, high)
def _get_ibu_range(self):
low = self._SO_get_ibu_low()
high = self._SO_get_ibu_high()
if low == 0 and high == 0:
return "varies"
else:
return "%i - %i" % (low, high)
Is there an easy way, pythonic way to write that magic to_json() function?
You can use the python json module with the SQLObject sqlmeta class. Like this:
def to_json(obj):
return json.dumps(dict((c, getattr(obj, c)) for c in obj.sqlmeta.columns))
When I run this with your class Foo I get:
>>> print to_json(f)
{"bar": "test", "lulz": "only for the", "baz": true}
Edit: if you want to include magic attributes in your json string and you don't mind using something of a hack, you could abuse the fact that the attributes of your object are python properties. For example, if I add a magic attribute foo to your original sample class:
class Foo(SQLObject):
bar = UnicodeCol(length=128)
baz = BoolCol(default=True)
lulz = UnicodeCol(length=256)
def _get_foo(self):
return "foo"
Then I can define the to_json() function like this:
def to_json(obj):
cls = type(obj)
d = dict((c, getattr(obj, c)) for c in vars(cls) if isinstance(getattr(cls, c), property))
return json.dumps(d)
Now, if I do this:
f = Foo(bar = "test", lulz = "only for the")
print to_json(f)
I get the following result:
{"baz": true, "lulz": "only for the", "bar": "test", "foo": "foo"}
import json
json.dumps(obj_instance.sqlmeta.asDict())
In my case this object contained datetimes which json doesn't serialize, so I did something like this:
json.dumps(dict((k, str(v)) for (k,v) in obj_instance.sqlmeta.asDict().items()))
Something like this ...
class MyTable( sqlobject.SQLObject ):
# ... your columns ...
json.dumps({
'MyTable': [row.sqlmeta.asDict() for row in MyTable.select()]
}, indent=4, sort_keys=True )
Suppose you have a list of sqlobject.SQLObject derived classes called
'Tables'
Tables = [MyTable, ...]
def dump():
r={}
for t in Tables:
r[t.__name__] = [row.sqlmeta.asDict() for row in t.select()]
return json.dumps(r, indent=4, sort_keys=True)