Is it possible to mark a column as do-not-pickle in SQLAlchemy, such that it will get loaded from the database on demand after it is unpickled instead?
This can be partially achieved with a deferred column, but if the column is subsequently loaded it will also then get pickled.
(I'm asking as it looks like there's still a problem in Python2.7 with pickling Geoalchemy2 Geometry columns due to the unpickleable nature of the built in buffer: https://github.com/geoalchemy/geoalchemy/issues/24 )
This is a little bit involved because you can't just prevent pickling of a column, since what controls the deferred loading is actually in the _sa_instance_state attribute, so even if you don't pickle some attribute in your instance dictionary SQLAlchemy does not know that that means it's expired.
A (slightly hacky) solution is to expire the attribute when you unpickle:
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
foo = Column(String)
bar = Column(String)
def __getstate__(self):
return {k: v for k, v in self.__dict__.items() if k != "bar"}
def __setstate__(self, d):
for k, v in d.items():
self.__dict__[k] = v
self._sa_instance_state._expire_attributes(self.__dict__, ("bar",))
Note that the result of unpickling is a detached instance, so you need to be careful how you use it:
foo = session.query(Foo).first()
foo = pickle.loads(pickle.dumps(foo))
print(foo.foo) # fine
# print(foo.bar) # sqlalchemy.orm.exc.DetachedInstanceError
foo = session.merge(foo, load=False)
print(foo.foo) # no query
print(foo.bar) # causes a query
Here's a naive __getstate__ implementation that doesn't work, but will hopefully prompt the right answer:
Base = declarative_base(metadata=route_metadata)
cols_to_omit = ['my_col_1']
class MyClass(Base):
my_col_0 = Column(Integer)
my_col_1 = Column(Integer)
def __getstate__(self):
return dict((c, getattr(self, c)) for c in \
self.__table__.columns.keys() \
if c not in cols_to_omit)
def __setstate__(self, *args):
for c, v in args[0].items():
setattr(self, c, v)
from pickle import *
pp = dumps(MyClass.query.first())
obj = loads(pp)
This gives AttributeError: 'MyClass' object has no attribute '_sa_instance_state'
Related
I am having trouble in making things work with a Custom ParallelEnv I wrote by using PettingZoo. I am using SuperSuit's ss.pettingzoo_env_to_vec_env_v1(env) as a wrapper to Vectorize the environment and make it work with Stable-Baseline3 and documented here.
You can find attached a summary of the most relevant part of the code:
from typing import Optional
from gym import spaces
import random
import numpy as np
from pettingzoo import ParallelEnv
from pettingzoo.utils.conversions import parallel_wrapper_fn
import supersuit as ss
from gym.utils import EzPickle, seeding
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
petting_zoo = env
class parallel_env(ParallelEnv, EzPickle):
metadata = {'render_modes': ['ansi'], "name": "PlayerEnv-Multi-v0"}
def __init__(self, n_agents: int = 20, new_step_api: bool = True) -> None:
EzPickle.__init__(
self,
n_agents,
new_step_api
)
self._episode_ended = False
self.n_agents = n_agents
self.possible_agents = [
f"player_{idx}" for idx in range(n_agents)]
self.agents = self.possible_agents[:]
self.agent_name_mapping = dict(
zip(self.possible_agents, list(range(len(self.possible_agents))))
)
self.observation_spaces = spaces.Dict(
{agent: spaces.Box(shape=(len(self.agents),),
dtype=np.float64, low=0.0, high=1.0) for agent in self.possible_agents}
)
self.action_spaces = spaces.Dict(
{agent: spaces.Discrete(4) for agent in self.possible_agents}
)
self.current_step = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def __calculate_observation(self, agent_id: int) -> np.ndarray:
return self.observation_space(agent_id).sample()
def __calculate_observations(self) -> np.ndarray:
observations = {
agent: self.__calculate_observation(
agent_id=agent)
for agent in self.agents
}
return observations
def observe(self, agent):
return self.__calculate_observation(agent_id=agent)
def step(self, actions):
if self._episode_ended:
return self.reset()
observations = self.__calculate_observations()
rewards = random.sample(range(100), self.n_agents)
self.current_step += 1
self._episode_ended = self.current_step >= 100
infos = {agent: {} for agent in self.agents}
dones = {agent: self._episode_ended for agent in self.agents}
rewards = {
self.agents[i]: rewards[i]
for i in range(len(self.agents))
}
if self._episode_ended:
self.agents = {} # To satisfy `set(par_env.agents) == live_agents`
return observations, rewards, dones, infos
def reset(self,
seed: Optional[int] = None,
return_info: bool = False,
options: Optional[dict] = None,):
self.agents = self.possible_agents[:]
self._episode_ended = False
self.current_step = 0
observations = self.__calculate_observations()
return observations
def render(self, mode="human"):
# TODO: IMPLEMENT
print("TO BE IMPLEMENTED")
def close(self):
pass
Unfortunately when I try to test with the following main procedure:
from stable_baselines3 import DQN, PPO
from stable_baselines3.common.env_checker import check_env
from dummy_env import dummy
from pettingzoo.test import parallel_api_test
if __name__ == '__main__':
# Testing the parallel algorithm alone
env_parallel = dummy.parallel_env()
parallel_api_test(env_parallel) # This works!
# Testing the environment with the wrapper
env = dummy.petting_zoo()
# ERROR: AssertionError: The observation returned by the `reset()` method does not match the given observation space
check_env(env)
# Model initialization
model = PPO("MlpPolicy", env, verbose=1)
# ERROR: ValueError: could not broadcast input array from shape (20,20) into shape (20,)
model.learn(total_timesteps=10_000)
I get the following error:
AssertionError: The observation returned by the `reset()` method does not match the given observation space
If I skip check_env() I get the following one:
ValueError: could not broadcast input array from shape (20,20) into shape (20,)
It seems like that ss.pettingzoo_env_to_vec_env_v1(env) is capable of splitting the parallel environment in multiple vectorized ones, but not for the reset() function.
Does anyone know how to fix this problem?
Plese find the Github Repository to reproduce the problem.
You should double check the reset() function in PettingZoo. It will return None instead of an observation like GYM
Thanks to discussion I had in the issue section of the SuperSuit repository, I am able to post the solution to the problem. Thanks to jjshoots!
First of all it is necessary to have the latest SuperSuit version. In order to get that I needed to install Stable-Baseline3 using the instructions here to make it work with gym 0.24+.
After that, taking the code in the question as example, it is necessary to substitute
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
with
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
env_ = ss.concat_vec_envs_v1(env_, 1, base_class="stable_baselines3")
return env_
The outcomes are:
Outcome 1: leaving the line with check_env(env) I got an error AssertionError: Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py
Outcome 2: removing the line with check_env(env), the agent starts training successfully!
In the end, I think that the argument base_class="stable_baselines3" made the difference.
Only the small problem on check_env remains to be reported, but I think it can be considered as trivial if the training works.
I am using Marshmallow to send instance of my Decision class to JSON. However, this will also dump the attributes which are None, e.g. my attribute score will translate to null in JSON. After that I am unable to read the JSON again using the same approach.
https://repl.it/repls/VoluminousMulticoloredFacts
The last line is where it currently fails. I need to either NOT dump None to JSON or skip null during loading:
import json
from marshmallow import Schema, fields, post_load
json_data = """{
"appid": "2309wfjwef",
"strategy": "First Strategy"
}"""
# Output class definition
class Decision(object):
def __init__(self, appid = None, strategy = None, score = None):
self.appid = appid
self.strategy = strategy
self.score = score
class DecisionSchema(Schema):
appid = fields.Str()
strategy = fields.Str()
score = fields.Int()
#post_load
def make_decision(self, data):
return Decision(**data)
# Deserialization into object
dec_json = json.loads(json_data)
schema = DecisionSchema()
dec = schema.load(dec_json).data
print(dec.strategy)
# Dump results back to JSON
schema = DecisionSchema()
out = schema.dumps(dec)
print(out.data)
# Load back from dump
schema = DecisionSchema()
dec = schema.load(out).data
#print(dec.strategy) # returns error currently
An "official" answer from marshmallow development team can be found in this comment in the bugtracker:
Use a post_dump method.
from marshmallow import Schema, fields, post_dump
class BaseSchema(Schema):
SKIP_VALUES = set([None])
#post_dump
def remove_skip_values(self, data, **kwargs):
return {
key: value for key, value in data.items()
if value not in self.SKIP_VALUES
}
class MySchema(BaseSchema):
foo = fields.Field()
bar = fields.Field()
sch = MySchema()
sch.dump({'foo': 42, 'bar': None}).data # {'foo': 42}
As I point out in a further comment, there's a shortcoming: it will also remove None when the field's allow_none is True.
As I pointed out in my comment above this messes with the order if you use the
class Meta:
fields = (
'field1', 'field2'
)
ordered = True
To fix this I used this:
# Remove None fields
#pre_dump
def remove_skip_values(self, data):
return {
key: value for key, value in data.items()
if value is not None
}
This works for my dictonary of objects
I just discovered the json_normalize function which works great in taking a JSON object and giving me a pandas Dataframe. Now I want the reverse operation which takes that same Dataframe and gives me a json (or json-like dictionary which I can easily turn to json) with the same structure as the original json.
Here's an example: https://hackersandslackers.com/json-into-pandas-dataframes/.
They take a JSON object (or JSON-like python dictionary) and turn it into a dataframe, but I now want to take that dataframe and turn it back into a JSON-like dictionary (to later dump to json file).
I implemented it with a couple functions
def set_for_keys(my_dict, key_arr, val):
"""
Set val at path in my_dict defined by the string (or serializable object) array key_arr
"""
current = my_dict
for i in range(len(key_arr)):
key = key_arr[i]
if key not in current:
if i==len(key_arr)-1:
current[key] = val
else:
current[key] = {}
else:
if type(current[key]) is not dict:
print("Given dictionary is not compatible with key structure requested")
raise ValueError("Dictionary key already occupied")
current = current[key]
return my_dict
def to_formatted_json(df, sep="."):
result = []
for _, row in df.iterrows():
parsed_row = {}
for idx, val in row.iteritems():
keys = idx.split(sep)
parsed_row = set_for_keys(parsed_row, keys, val)
result.append(parsed_row)
return result
#Where df was parsed from json-dict using json_normalize
to_formatted_json(df, sep=".")
A simpler approach:
Uses only 1 function...
def df_to_formatted_json(df, sep="."):
"""
The opposite of json_normalize
"""
result = []
for idx, row in df.iterrows():
parsed_row = {}
for col_label,v in row.items():
keys = col_label.split(sep)
current = parsed_row
for i, k in enumerate(keys):
if i==len(keys)-1:
current[k] = v
else:
if k not in current.keys():
current[k] = {}
current = current[k]
# save
result.append(parsed_row)
return result
df.to_json(path)
or
df.to_dict()
I just implemented this using 2 functions.
Get a full list of fields from the DataFrame that are part of a nested field. Only the parent i.e. if location.city.code fits the criteria, we only care about location.city. Sort it by the deepest level of nesting, i.e. location.city is nested further than location.
Starting with the deepest nested parent field, find all child fields by searching in the column name. Create a field in the DataFrame for the parent field, which is a combination of all child fields (renamed so that they lose the nesting structure, e.g. location.city.code becomes code) converted to JSON and then loaded to a dictionary value. Finally, drop all of the child fields.
def _get_nested_fields(df: pd.DataFrame) -> List[str]:
"""Return a list of nested fields, sorted by the deepest level of nesting first."""
nested_fields = [*{field.rsplit(".", 1)[0] for field in df.columns if "." in field}]
nested_fields.sort(key=lambda record: len(record.split(".")), reverse=True)
return nested_fields
def df_denormalize(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert a normalised DataFrame into a nested structure.
Fields separated by '.' are considered part of a nested structure.
"""
nested_fields = _get_nested_fields(df)
for field in nested_fields:
list_of_children = [column for column in df.columns if field in column]
rename = {
field_name: field_name.rsplit(".", 1)[1] for field_name in list_of_children
}
renamed_fields = df[list_of_children].rename(columns=rename)
df[field] = json.loads(renamed_fields.to_json(orient="records"))
df.drop(list_of_children, axis=1, inplace=True)
return df
let me throw in my two cents
after backward converting you might need to drop empty columns from your generated jsons
therefore, i checked if val != np.nan. but u cant directly do it, instead you need to check val == val or not, because np.nan != itself.
my version:
def to_formatted_json(df, sep="."):
result = []
for _, row in df.iterrows():
parsed_row = {}
for idx, val in row.iteritems():
if val == val:
keys = idx.split(sep)
parsed_row = set_for_keys(parsed_row, keys, val)
result.append(parsed_row)
return result
This is a solution which looks working to me. It is designed to work on a dataframe with one line, but it can be easily looped over large dataframes.
class JsonRecreate():
def __init__(self, df):
self.df = df
def pandas_to_json(self):
df = self.df
# determine the number of nesting levels
number_levels = np.max([len(i.split('.')) for i in df.columns])
# put all the nesting levels in an a list
levels = []
for level_idx in np.arange(number_levels):
levels.append(np.array([i.split('.')[level_idx] if len(i.split('.')) > level_idx else ''
for i in df.columns.tolist()]))
self.levels = levels
return self.create_dict(upper_bound = self.levels[0].shape[0])
def create_dict(self, level_idx = 0, lower_bound = 0, upper_bound = 100):
''' Function to create the dictionary starting from a pandas dataframe generated by json_normalize '''
levels = self.levels
dict_ = {}
# current nesting level
level = levels[level_idx]
# loop over all the relevant elements of the level (relevant w.r.t. its parent)
for key in [i for i in np.unique(level[lower_bound: upper_bound]) if i != '']:
# find where a particular key occurs in the level
correspondence = np.where(level[lower_bound: upper_bound] == key)[0] + lower_bound
# check if the value(s) corresponding to the key appears once (multiple times)
if correspondence.shape[0] == 1:
# if the occurence is unique, append the value to the dictionary
dict_[key] = self.df.values[0][correspondence[0]]
else:
# otherwhise, redefine the relevant bounds and call the function recursively
lower_bound_, upper_bound_ = correspondence.min(), correspondence.max() + 1
dict_[key] = self.create_dict(level_idx + 1, lower_bound_, upper_bound_)
return dict_
I tested it with a simple dataframe such as:
df = pd.DataFrame({'a.b': [1], 'a.c.d': [2], 'a.c.e': [3], 'a.z.h1': [-1], 'a.z.h2': [-2], 'f': [4], 'g.h': [5], 'g.i.l': [6], 'g.i.m': [7], 'g.z.h1': [-3], 'g.z.h2': [-4]})
The order in the json is not exactly preserved in the resulting json, but it can be easily handled if needed.
We are interested in using a surrogate model in an aircraft design process implemented in OpenMDAO. Basically we want to use an aerodynamic code (such as VSPaero in our aim) to produce a database (using a DOE ) and then built a surrogate that will be used in the design process. It looks like your proposal 2) in use of MOE in openMDAO and we also want to access to the "gradient" information of the surrogate to be used in the full design problem .
We started from the code you have provided in nested problem question and try to built a mock up case with simplified component for aerodynamic . The example code is below (using kriging) and we have two concerns to finish it:
we need to implement a "linearize" function in our component if we want to use surrogate gradient information: I guess we should use the "calc_gradient" function of problem to do this . Is it right ?
in our example code, the training will be done each time we call the component what is not very efficient : is there a way to call it only once or to do the surrogate training only after the setup() of the bigger problem (aircraft design in our case )?
Here is the code (sorry it is a bit long):
from openmdao.api import IndepVarComp, Group, Problem, ScipyOptimizer, ExecComp, DumpRecorder, Component, NLGaussSeidel,ScipyGMRES, Newton,SqliteRecorder,MetaModel, \
KrigingSurrogate, FloatKrigingSurrogate
from openmdao.drivers.latinhypercube_driver import LatinHypercubeDriver, OptimizedLatinHypercubeDriver
from openmdao.solvers.solver_base import NonLinearSolver
import numpy as np
import sys
alpha_test = np.array([0.56, 0.24, 0.30, 0.32, 0.20])
eta_test = np.array([-0.30, -0.14, -0.19, -0.18, -0.12])
num_elem = len(alpha_test)
class SysAeroSurrogate(Component):
""" Simulates the presence of an aero surrogate mode using linear aerodynamic model """
""" coming from pymission code """
""" https://github.com/OpenMDAO-Plugins/pyMission/blob/master/src/pyMission/aerodynamics.py """
def __init__(self, num_elem=1):
super(SysAeroSurrogate, self).__init__()
self.add_param('alpha', 0.5)
self.add_param('eta', -0.33)
self.add_param('AR', 0.0)
self.add_param('oswald', 0.0)
self.add_output('CL', val=0.0)
self.add_output('CD', val=0.0) ## Drag Coefficient
def solve_nonlinear(self, params, unknowns, resids):
""" Compute lift and drag coefficient using angle of attack and tail
rotation angles. Linear aerodynamics is assumed."""
alpha = params['alpha']
eta = params['eta']
aspect_ratio = params['AR']
oswald = params['oswald']
lift_c0 = 0.30
lift_ca = 6.00
lift_ce = 0.27
drag_c0 = 0.015
unknowns['CL'] = lift_c0 + lift_ca*alpha*1e-1 + lift_ce*eta*1e-1
unknowns['CD'] = (drag_c0 + (unknowns['CL'])**2 /(np.pi * aspect_ratio * oswald))/1e-1
class SuroMM(Group):
def __init__(self):
super(SuroMM, self).__init__()
#kriging
AeroMM = self.add("AeroMM", MetaModel())
AeroMM.add_param('alpha', val=0.)
AeroMM.add_param('eta', val=0.)
AeroMM.add_output('CL_MM', val=0., surrogate=FloatKrigingSurrogate())
AeroMM.add_output('CD_MM', val=0., surrogate=FloatKrigingSurrogate())
class SurrogateAero(Component):
def __init__(self):
super(SurrogateAero, self).__init__()
## Inputs to this subprob
self.add_param('alpha', val=0.5*np.ones(num_elem)) ## Angle of attack
self.add_param('eta', val=0.5*np.ones(num_elem)) ## Tail rotation angle
self.add_param('AR', 0.0)
self.add_param('oswald', 0.0)
## Unknowns for this sub prob
self.add_output('CD', val=np.zeros(num_elem))
self.add_output('CL', val=np.zeros(num_elem))
#####
self.problem = prob = Problem()
prob.root = Group()
prob.root.add('d1', SuroMM(), promotes=['*'])
prob.setup()
#### training of metamodel
prob['AeroMM.train:alpha'] = DOEX1
prob['AeroMM.train:eta'] = DOEX2
prob['AeroMM.train:CL_MM'] = DOEY1
prob['AeroMM.train:CD_MM'] =DOEY2
def solve_nonlinear(self, params, unknowns, resids):
CL_temp=np.zeros(num_elem)
CD_temp=np.zeros(num_elem)
prob = self.problem
# Pass values into our problem
for i in range(len(params['alpha'])):
prob['AeroMM.alpha'] = params['alpha'][i]
prob['AeroMM.eta'] = params['eta'][i]
# Run problem
prob.run()
CL_temp[i] = prob['AeroMM.CL_MM']
CD_temp[i] = prob['AeroMM.CD_MM']
# Pull values from problem
unknowns['CL'] = CL_temp
unknowns['CD'] = CD_temp
if __name__ == "__main__":
###### creation of database with DOE #####
top = Problem()
root = top.root = Group()
root.add('comp', SysAeroSurrogate(), promotes=['*'])
root.add('p1', IndepVarComp('alpha', val=0.50), promotes=['*'])
root.add('p2', IndepVarComp('eta',val=0.50), promotes=['*'])
root.add('p3', IndepVarComp('AR', 10.), promotes=['*'])
root.add('p4', IndepVarComp('oswald', 0.92), promotes=['*'])
top.driver = OptimizedLatinHypercubeDriver(num_samples=16, seed=0, population=20, generations=4, norm_method=2)
top.driver.add_desvar('alpha', lower=-5.0*(np.pi/180.0)*1e-1, upper=15.0*(np.pi/180.0)*1e-1)
top.driver.add_desvar('eta', lower=-5.0*(np.pi/180.0)*1e-1, upper=15.0*(np.pi/180.0)*1e-1)
top.driver.add_objective('CD')
recorder = SqliteRecorder('Aero')
recorder.options['record_params'] = True
recorder.options['record_unknowns'] = True
recorder.options['record_resids'] = False
recorder.options['record_metadata'] = False
top.driver.add_recorder(recorder)
top.setup()
top.run()
import sqlitedict
db = sqlitedict.SqliteDict( 'Aero', 'openmdao' )
print( list( db.keys() ) )
DOEX1 = []
DOEX2 = []
DOEY1 = []
DOEY2 = []
for i in list(db.keys()):
data = db[i]
p = data['Parameters']
DOEX1.append(p['comp.alpha'])
DOEX2.append(p['comp.eta'])
p = data['Unknowns']
DOEY1.append(p['CL'])
DOEY2.append(p['CD'])
################ use of surrogate model ######
prob2 = Problem(root=Group())
prob2.root.add('SurrAero', SurrogateAero(), promotes=['*'])
prob2.root.add('v1', IndepVarComp('alpha', val=alpha_test), promotes=['*'])
prob2.root.add('v2', IndepVarComp('eta',val=eta_test), promotes=['*'])
prob2.setup()
prob2.run()
print'CL predicted:', prob2['CL']
print'CD predicted:', prob2['CD']
The way you have your model set up seems correct. The MetaModel component will only train its data one time (the first pass through the model), as you can see in this part of the source code. Every subsequent iteration, it just uses the trained surrogate thats already there.
The meta-model is also already setup to provide analytic derivatives of the predicted output with respect to the input independent variables. Derivatives of the prediction with respect to the training point values are not available in the base implementation. That requires a more complex setup that, at least for the moment, will require some custom setup that is not in the standard library.
EDIT -- took the code from below and made it so it can handle ForiegnKeys, Decimal numbers (although i'm doing a very forced float conversion). It returns a dict now so it can be recursive.
from sqlobject import SQLObject
from decimal import Decimal
def sqlobject_to_dict(obj):
json_dict = {}
cls_name = type(obj)
for attr in vars(cls_name):
if isinstance(getattr(cls_name, attr), property):
attr_value = getattr(obj, attr)
attr_class = type(attr_value)
attr_parent = attr_class.__bases__[0]
if isinstance(getattr(obj, attr), Decimal):
json_dict[attr] = float(getattr(obj, attr))
elif attr_parent == SQLObject:
json_dict[attr] = sqlobject_to_dict(getattr(obj, attr))
else:
json_dict[attr] = getattr(obj, attr)
return json_dict
EDIT -- changed to add the actual data model -- there are generated values that need to be accessed and Decimal() columns that need dealing with as well.
So I've seen this: return SQL table as JSON in python but it's not really what I'm looking for -- that's "brute force" -- you need to know the names of the attributes of the object in order to generate the JSON response.
What I'd like to do is something like this (the name of the class and it's attributes are not-important)
class BJCPStyle(SQLObject):
name = UnicodeCol(length=128, default=None)
beer_type = UnicodeCol(length=5, default=None)
category = ForeignKey('BJCPCategory')
subcategory = UnicodeCol(length=1, default=None)
aroma = UnicodeCol(default=None)
appearance = UnicodeCol(default=None)
flavor = UnicodeCol(default=None)
mouthfeel = UnicodeCol(default=None)
impression = UnicodeCol(default=None)
comments = UnicodeCol(default=None)
examples = UnicodeCol(default=None)
og_low = SGCol(default=None)
og_high = SGCol(default=None)
fg_low = SGCol(default=None)
fg_high = SGCol(default=None)
ibu_low = IBUCol(default=None)
ibu_high = IBUCol(default=None)
srm_low = SRMCol(default=None)
srm_high = SRMCol(default=None)
abv_low = DecimalCol(size=3, precision=1, default=None)
abv_high = DecimalCol(size=3, precision=1, default=None)
versions = Versioning()
def _get_combined_category_id(self):
return "%s%s" % (self.category.category_id, self.subcategory)
def _get_og_range(self):
low = self._SO_get_og_low()
high = self._SO_get_og_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.3f - %.3f" % (low, high)
def _get_fg_range(self):
low = self._SO_get_fg_low()
high = self._SO_get_fg_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.3f - %.3f" % (low, high)
def _get_srm_range(self):
low = self._SO_get_srm_low()
high = self._SO_get_srm_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.1f - %.1f" % (low, high)
def _get_abv_range(self):
low = self._SO_get_abv_low()
high = self._SO_get_abv_high()
if low == 0 and high == 0:
return "varies"
else:
return "%.2f%% - %.2f%%" % (low, high)
def _get_ibu_range(self):
low = self._SO_get_ibu_low()
high = self._SO_get_ibu_high()
if low == 0 and high == 0:
return "varies"
else:
return "%i - %i" % (low, high)
Is there an easy way, pythonic way to write that magic to_json() function?
You can use the python json module with the SQLObject sqlmeta class. Like this:
def to_json(obj):
return json.dumps(dict((c, getattr(obj, c)) for c in obj.sqlmeta.columns))
When I run this with your class Foo I get:
>>> print to_json(f)
{"bar": "test", "lulz": "only for the", "baz": true}
Edit: if you want to include magic attributes in your json string and you don't mind using something of a hack, you could abuse the fact that the attributes of your object are python properties. For example, if I add a magic attribute foo to your original sample class:
class Foo(SQLObject):
bar = UnicodeCol(length=128)
baz = BoolCol(default=True)
lulz = UnicodeCol(length=256)
def _get_foo(self):
return "foo"
Then I can define the to_json() function like this:
def to_json(obj):
cls = type(obj)
d = dict((c, getattr(obj, c)) for c in vars(cls) if isinstance(getattr(cls, c), property))
return json.dumps(d)
Now, if I do this:
f = Foo(bar = "test", lulz = "only for the")
print to_json(f)
I get the following result:
{"baz": true, "lulz": "only for the", "bar": "test", "foo": "foo"}
import json
json.dumps(obj_instance.sqlmeta.asDict())
In my case this object contained datetimes which json doesn't serialize, so I did something like this:
json.dumps(dict((k, str(v)) for (k,v) in obj_instance.sqlmeta.asDict().items()))
Something like this ...
class MyTable( sqlobject.SQLObject ):
# ... your columns ...
json.dumps({
'MyTable': [row.sqlmeta.asDict() for row in MyTable.select()]
}, indent=4, sort_keys=True )
Suppose you have a list of sqlobject.SQLObject derived classes called
'Tables'
Tables = [MyTable, ...]
def dump():
r={}
for t in Tables:
r[t.__name__] = [row.sqlmeta.asDict() for row in t.select()]
return json.dumps(r, indent=4, sort_keys=True)