Python Mock Patch Two Functions that are similar - sqlalchemy

Let's say I have a function that has two similar function calls:
def foo():
test = one_func("SELECT * from Users;")
test1 = one_func("SELECT * from Addresses;")
return test, test1
How do I patch each of these function contents? Here's my attempt:
#patch('one_func')
def test_foo(self, mock_one_func):
mock_one_func.return_value = one_func("SELECT * from TestUsers;")
mock_one_func.return_value = one_func("SELECT * from TestAddresses;")
But I think this method patches one_func as a whole per function. Which results to:
def foo():
test = one_func("SELECT * from TestUsers;")
test1 = one_func("SELECT * from TestUsers;")
return test, test1
Then on the next line
def foo():
test = one_func("SELECT * from TestAddresses;")
test1 = one_func("SELECT * from TestAddresses;")
return test, test1
What I want to happen in the patched function is.
def foo():
test = one_func("SELECT * from TestUsers;")
test1 = one_func("SELECT * from TestAddresses;")
return test, test1

The way to achieve what you need is using side_effect instead of return_value. side_effect can be many things. If it is an Exception class or object, it will raise that exception whenever the patched method is called. If it is a list of values, will return each value in sequence for each method call. If it is a function, will execute that function with the parameters from each mocked method call.
Here is a working example, where I show you how to use a list of values on side_effect and a side_effect function. What's nice about using a function is that you can make it return specific values depending on the arguments of the patched method call.
from mock import patch
import unittest
class MyClass(object):
def one_func(self, query):
return ''
def foo(self):
test = self.one_func("SELECT * from Users;")
test1 = self.one_func("SELECT * from Addresses;")
return test, test1
class Test(unittest.TestCase):
#patch.object(MyClass, 'one_func')
def test_foo(self, one_func_mock):
# side_effect can be a list of responses that will be returned in
# subsequent calls
one_func_mock.side_effect = ['users', 'addresses']
self.assertEqual(('users', 'addresses'), MyClass().foo())
# side_effect can also be a method which will return different mock
# responses depending on args:
def side_effect(args):
if args == "SELECT * from Users;":
return 'other users'
if args == "SELECT * from Addresses;":
return 'other addresses'
one_func_mock.side_effect = side_effect
self.assertEqual(('other users', 'other addresses'), MyClass().foo())
unittest.main()

Related

Problem with PettingZoo and Stable-Baselines3 with a ParallelEnv

I am having trouble in making things work with a Custom ParallelEnv I wrote by using PettingZoo. I am using SuperSuit's ss.pettingzoo_env_to_vec_env_v1(env) as a wrapper to Vectorize the environment and make it work with Stable-Baseline3 and documented here.
You can find attached a summary of the most relevant part of the code:
from typing import Optional
from gym import spaces
import random
import numpy as np
from pettingzoo import ParallelEnv
from pettingzoo.utils.conversions import parallel_wrapper_fn
import supersuit as ss
from gym.utils import EzPickle, seeding
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
petting_zoo = env
class parallel_env(ParallelEnv, EzPickle):
metadata = {'render_modes': ['ansi'], "name": "PlayerEnv-Multi-v0"}
def __init__(self, n_agents: int = 20, new_step_api: bool = True) -> None:
EzPickle.__init__(
self,
n_agents,
new_step_api
)
self._episode_ended = False
self.n_agents = n_agents
self.possible_agents = [
f"player_{idx}" for idx in range(n_agents)]
self.agents = self.possible_agents[:]
self.agent_name_mapping = dict(
zip(self.possible_agents, list(range(len(self.possible_agents))))
)
self.observation_spaces = spaces.Dict(
{agent: spaces.Box(shape=(len(self.agents),),
dtype=np.float64, low=0.0, high=1.0) for agent in self.possible_agents}
)
self.action_spaces = spaces.Dict(
{agent: spaces.Discrete(4) for agent in self.possible_agents}
)
self.current_step = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def __calculate_observation(self, agent_id: int) -> np.ndarray:
return self.observation_space(agent_id).sample()
def __calculate_observations(self) -> np.ndarray:
observations = {
agent: self.__calculate_observation(
agent_id=agent)
for agent in self.agents
}
return observations
def observe(self, agent):
return self.__calculate_observation(agent_id=agent)
def step(self, actions):
if self._episode_ended:
return self.reset()
observations = self.__calculate_observations()
rewards = random.sample(range(100), self.n_agents)
self.current_step += 1
self._episode_ended = self.current_step >= 100
infos = {agent: {} for agent in self.agents}
dones = {agent: self._episode_ended for agent in self.agents}
rewards = {
self.agents[i]: rewards[i]
for i in range(len(self.agents))
}
if self._episode_ended:
self.agents = {} # To satisfy `set(par_env.agents) == live_agents`
return observations, rewards, dones, infos
def reset(self,
seed: Optional[int] = None,
return_info: bool = False,
options: Optional[dict] = None,):
self.agents = self.possible_agents[:]
self._episode_ended = False
self.current_step = 0
observations = self.__calculate_observations()
return observations
def render(self, mode="human"):
# TODO: IMPLEMENT
print("TO BE IMPLEMENTED")
def close(self):
pass
Unfortunately when I try to test with the following main procedure:
from stable_baselines3 import DQN, PPO
from stable_baselines3.common.env_checker import check_env
from dummy_env import dummy
from pettingzoo.test import parallel_api_test
if __name__ == '__main__':
# Testing the parallel algorithm alone
env_parallel = dummy.parallel_env()
parallel_api_test(env_parallel) # This works!
# Testing the environment with the wrapper
env = dummy.petting_zoo()
# ERROR: AssertionError: The observation returned by the `reset()` method does not match the given observation space
check_env(env)
# Model initialization
model = PPO("MlpPolicy", env, verbose=1)
# ERROR: ValueError: could not broadcast input array from shape (20,20) into shape (20,)
model.learn(total_timesteps=10_000)
I get the following error:
AssertionError: The observation returned by the `reset()` method does not match the given observation space
If I skip check_env() I get the following one:
ValueError: could not broadcast input array from shape (20,20) into shape (20,)
It seems like that ss.pettingzoo_env_to_vec_env_v1(env) is capable of splitting the parallel environment in multiple vectorized ones, but not for the reset() function.
Does anyone know how to fix this problem?
Plese find the Github Repository to reproduce the problem.
You should double check the reset() function in PettingZoo. It will return None instead of an observation like GYM
Thanks to discussion I had in the issue section of the SuperSuit repository, I am able to post the solution to the problem. Thanks to jjshoots!
First of all it is necessary to have the latest SuperSuit version. In order to get that I needed to install Stable-Baseline3 using the instructions here to make it work with gym 0.24+.
After that, taking the code in the question as example, it is necessary to substitute
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
with
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
env_ = ss.concat_vec_envs_v1(env_, 1, base_class="stable_baselines3")
return env_
The outcomes are:
Outcome 1: leaving the line with check_env(env) I got an error AssertionError: Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py
Outcome 2: removing the line with check_env(env), the agent starts training successfully!
In the end, I think that the argument base_class="stable_baselines3" made the difference.
Only the small problem on check_env remains to be reported, but I think it can be considered as trivial if the training works.

What is the SQLAlchemy (ORM) command to return an object (which represents a row in the database) based on a query filter?

When I issue the command sess.query(TestClass).all()
SQLAlchemy returns the two objects I instantiated (as expected).
Code:
query_result_1 = sess.query(TestClass).all()
print(query_result_1)
output:
[<main.TestClass object at 0x10e9df7c0>, <main.TestClass object at 0x10e9df9a0>]
When I issue the command that I expected to return an object based on a filter, I instead got SQL commands...
code:
query_result_2 = sess.query(TestClass).filter(TestClass.name=='name_1')
output:
SELECT test_table.id AS test_table_id, test_table.name AS test_table_name
FROM test_table
WHERE test_table.name = ?
Here is the entire script:
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import Session
from sqlalchemy.orm import Query
print('\nsqlalchemy version: ' + sqlalchemy.__version__)
Base = declarative_base()
class TestClass(Base):
__tablename__ = 'test_table'
id = Column(Integer, primary_key=True)
name = Column(String(20))
def __init__(self, name):
self.name = name
engine = create_engine('sqlite:///', echo=False)
Base.metadata.create_all(engine)
sess = Session(engine)
obj_1 = TestClass('name_1')
sess.add(obj_1)
obj_2 = TestClass('name_2')
sess.add(obj_2)
print('\ntest instance creation (before commit):')
print('obj_1.name = %s' % (obj_1.name))
print('obj_2.name = %s' % (obj_2.name))
query_result_1 = sess.query(TestClass).all()
print('\nquery_result_1 (query all before commit):')
print(query_result_1)
# QUESTION: What's wrong with this statement...'
# or my expectation of what it should produce?
query_result_2 = sess.query(TestClass).filter(TestClass.name=='name_1')
print('\nquery_result_2 (query (filter by name) before commit):')
print('I expected this query to return the TestClass object with name = name_1')
print('instead, I got this:')
print(query_result_2)
sess.commit()
query_result_3 = sess.query(TestClass).all()
print('\nquery_result_3 (query all after commit):')
print(query_result_3)
# QUESTION: What's wrong with this statement...'
# or my expectation of what it should produce?
query_result_4 = sess.query(TestClass).filter(TestClass.name=='name_1')
print('\nquery_result_4 (query (filter by name) after commit):')
print('I expected this query to return the TestClass object with name = name_1')
print('instead, I got this:')
print(query_result_4)
Here is the terminal output:
sqlalchemy version: 1.3.17
test instance creation (before commit):
obj_1.name = name_1
obj_2.name = name_2
query_result_1 (query all before commit):
[<main.TestClass object at 0x10e9df7c0>, <main.TestClass object at 0x10e9df9a0>]
query_result_2 (query (filter by name) before commit):
I expected this query to return the TestClass object with name = name_1
instead, I got this:
SELECT test_table.id AS test_table_id, test_table.name AS test_table_name
FROM test_table
WHERE test_table.name = ?
query_result_3 (query all after commit):
[<main.TestClass object at 0x10e9df7c0>, <main.TestClass object at 0x10e9df9a0>]
query_result_4 (query (filter by name) after commit):
I expected this query to return the TestClass object with name = name_1
instead, I got this:
SELECT test_table.id AS test_table_id, test_table.name AS test_table_name
FROM test_table
WHERE test_table.name = ?
UPDATE...
I have figured out that SQLAlchemy is, in fact, returning a list of objects (in this case the list only contains one object) after I issue the query statement with the filter: query(class).filter(class.attrbt==___).
The good news is that SQLAlchemy is (mostly) behaving the way I expected, and there is nothing wrong with my query statement. But now I have a different question:
Why does the output from SQLAlchemy show SQL commands in response to the query().filter() statement? In response to the query().all() statement it returns a list of objects - this is what it "should" do (and, in fact, it is what is happening) in response to the query().filter() statement.
Your first example returns a list of objects because you invoked .all() to actually execute the query and return the results.
Your second example prints the SQL because you have created the query but you haven't executed it yet. .filter modifies the query but does not execute it. Compare ...
thing = session.query(Account).filter(Account.id == 1)
print(type(thing)) # <class 'sqlalchemy.orm.query.Query'>
print(thing) # SELECT so62234199.id AS so62234199_id, ...
... with ...
thing = session.query(Account).filter(Account.id == 1).one()
print(type(thing)) # <class '__main__.Account'>
print(thing) # <Account(id=1, created='2020-01-01 00:00:00')>
... and ...
thing = session.query(Account).filter(Account.id == 1).all()
print(type(thing)) # <class 'list'>
print(thing) # [<Account(id=1, created='2020-01-01 00:00:00')>]
For more information, see Returning Lists and Scalars in the ORM tutorial.

Django rounds left 3 digits while displaying bigint in mysql

I am using Django to display rows in mysql.
The table in mysql has a primary key which it bigint, and one of them is 871195445245063168, 18 digits.
But on my page, I see 871195445245063200 displayed, the least 3 digits are rounded. I am wondering where I make it wrong.
1, I define a class with a function named data_query to query mysql.
class MyQuery:
self.conn = MySQLdb.connect(host = self.DBHOST, user = self.DBUSER,
passwd = self.DBPWD,port = self.DBPORT,charset = self.CHARSET,connect_timeout=3)
def data_query(self,sql):
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
start = time.time()
cursor.execute(sql)
end = time.time()
sql_time = end - start
column_description = cursor.description
column_name = [ column[0] for column in column_description ]
res = cursor.fetchall()
cursor.close()
self.conn.close()
return res,column_name,sql_time
2, I defined a json encoder as follows
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
try:
return obj.strftime('%Y-%m-%d %H:%M:%S')
except ValueError:
return str(obj)
elif isinstance(obj, datetime.date):
try:
return obj.strftime('%Y-%m-%d')
except ValueError:
return str(obj)
elif isinstance(obj,datetime.timedelta):
return str(obj)
elif isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj,ObjectId):
return str(obj)
else:
return json.JSONEncoder.default(self, obj)
3, I get my display like this, with sensitive info replaced.
db = MyQuery(host, user, pwd, port)
sql_statement = 'select * from mytable where Findex=871195445245063168 limit 10'
sql_result, table_column_name, sql_time = db.data_query(sql_statement)
query_result = {}
column_name = column_format(table_column_name)
query_result['column'] = column_name
query_result['data'] = list(sql_result)
return HttpResponse(json.dumps(query_result, cls=CJsonEncoder), content_type='application/json')
So, what I go wrong here? Thanks.
This is a JavaScript issue. Your number is bigger than the largest safe integer in JavaScript (Number.MAX_SAFE_INTEGER), so it is rounded.
You can verify this in your browser console or in node.js
$ node
> x = 871195445245063168
871195445245063200
I assume you are either using your response in some kind of JavaScript frontend or you have some Browser extension to render the JSON, which is written in JavaScript.
If you request that URL with a client like curl, you will see that it is returned correctly from the server.

Is there a way to ensure that all my ctypes have argtypes?

I know I should specify argtypes for my C/C++ functions since some of my calls would otherwise result in stack corruption.
myCfunc.argtypes = [ct.c_void_p, ct.POINTER(ct.c_void_p)]
myCfunc.errcheck = my_error_check
In fact, I would like to verify that I did not forget to specify function prototypes (argtypes/errcheck) for any of my about 100 function calls...
Right now I just grep through my Python files and visually compare against my file containing the prototype definitions.
Is there a better way to verify that I have defined argtypes/errcheck for all my calls?
The mention of namespaces by #eryksun made me wrap the dll in a class that only exposes the explicitly annotated functions. As long as the dll doesn't have the function names "annotate" or "_error_check" (which my didn't), the following approach seems to work for me:
import ctypes as ct
class MyWinDll:
def __init__(self, dll_filename):
self._dll = ct.WinDLL(dll_filename)
# Specify function prototypes using the annotate function
self.annotate(self._dll.myCfunc, [ct.POINTER(ct.c_void_p)], self._error_check)
self.annotate(self._dll.myCfunc2, [ct.c_void_p], self._error_check)
...
def annotate(self, function, argtypes, errcheck):
# note that "annotate" may not be used as a function name in the dll...
function.argtypes = argtypes
function.errcheck = errcheck
setattr(self, function.__name__, function)
def _error_check(self, result, func, arguments):
if result != 0:
raise Exception
if __name__ == '__main__':
dll = MyWinDll('myWinDll.dll')
handle = ct.c_void_p(None)
# Now call the dll functions using the wrapper object
dll.myCfunc(ct.byref(handle))
dll.myCfunc2(handle)
Update: Comments by #eryksun made me try to improve the code by giving the user control of the WinDLL constructor and attempting to reduce repeated code:
import ctypes as ct
DEFAULT = object()
def annotate(dll_object, function_name, argtypes, restype=DEFAULT, errcheck=DEFAULT):
function = getattr(dll_object._dll, function_name)
function.argtypes = argtypes
# restype and errcheck is optional in the function_prototypes list
if restype is DEFAULT:
restype = dll_object.default_restype
function.restype = restype
if errcheck is DEFAULT:
errcheck = dll_object.default_errcheck
function.errcheck = errcheck
setattr(dll_object, function_name, function)
class MyDll:
def __init__(self, ct_dll, **function_prototypes):
self._dll = ct_dll
for name, prototype in function_prototypes.items():
annotate(self, name, *prototype)
class OneDll(MyDll):
def __init__(self, ct_dll):
# set default values for function_prototypes
self.default_restype = ct.c_int
self.default_errcheck = self._error_check
function_prototypes = {
'myCfunc': [[ct.POINTER(ct.c_void_p)]],
'myCfunc2': [[ct.c_void_p]],
# ...
'myCgetErrTxt': [[ct.c_int, ct.c_char_p, ct.c_size_t], DEFAULT, None]
}
super().__init__(ct_dll, **function_prototypes)
# My error check function actually calls the dll, so I keep it here...
def _error_check(self, result, func, arguments):
msg = ct.create_string_buffer(255)
if result != 0:
raise Exception(self.myCgetErrTxt(result, msg, ct.sizeof(msg)))
if __name__ == '__main__':
ct_dll = ct.WinDLL('myWinDll.dll')
dll = OneDll(ct_dll)
handle = ct.c_void_p(None)
dll.myCfunc(ct.byref(handle))
dll.myCfunc2(handle)
(I don't know if original code should be deleted, I kept it for reference.)
Here's a dummy class that can replace the DLL object's function call with a simple check to see the attributes have been defined:
class DummyFuncPtr(object):
restype = False
argtypes = False
errcheck = False
def __call__(self, *args, **kwargs):
assert self.restype
assert self.argtypes
assert self.errcheck
def __init__(self, *args):
pass
def __setattr__(self, key, value):
super(DummyFuncPtr, self).__setattr__(key, True)
To use it replace your DLL object's _FuncPtr class and then call each function to run the check, e.g.:
dll = ctypes.cdll.LoadLibrary(r'path/to/dll')
# replace the DLL's function pointer
# comment out this line to disable the dummy class
dll._FuncPtr = DummyFuncPtr
some_func = dll.someFunc
some_func.restype = None
some_func.argtypes = None
some_func.errcheck = None
another_func = dll.anotherFunc
another_func.restype = None
another_func.argtypes = None
some_func() # no error
another_func() # Assertion error due to errcheck not defined
The dummy class completely prevents the function from ever being called of course, so just comment out the replacement line to switch back to normal operation.
Note that it will only check each function when that function is called, so this would best be in a unit test file somewhere where the function is guaranteed to be called.

How to count sqlalchemy queries in unit tests

In Django I often assert the number of queries that should be made so that unit tests catch new N+1 query problems
from django import db
from django.conf import settings
settings.DEBUG=True
class SendData(TestCase):
def test_send(self):
db.connection.queries = []
event = Events.objects.all()[1:]
s = str(event) # QuerySet is lazy, force retrieval
self.assertEquals(len(db.connection.queries), 2)
In in SQLAlchemy tracing to STDOUT is enabled by setting the echo flag on
engine
engine.echo=True
What is the best way to write tests that count the number of queries made by SQLAlchemy?
class SendData(TestCase):
def test_send(self):
event = session.query(Events).first()
s = str(event)
self.assertEquals( ... , 2)
I've created a context manager class for this purpose:
class DBStatementCounter(object):
"""
Use as a context manager to count the number of execute()'s performed
against the given sqlalchemy connection.
Usage:
with DBStatementCounter(conn) as ctr:
conn.execute("SELECT 1")
conn.execute("SELECT 1")
assert ctr.get_count() == 2
"""
def __init__(self, conn):
self.conn = conn
self.count = 0
# Will have to rely on this since sqlalchemy 0.8 does not support
# removing event listeners
self.do_count = False
sqlalchemy.event.listen(conn, 'after_execute', self.callback)
def __enter__(self):
self.do_count = True
return self
def __exit__(self, *_):
self.do_count = False
def get_count(self):
return self.count
def callback(self, *_):
if self.do_count:
self.count += 1
Use SQLAlchemy Core Events to log/track queries executed (you can attach it from your unit tests so they don't impact your performance on the actual application:
event.listen(engine, "before_cursor_execute", catch_queries)
Now you write the function catch_queries, where the way depends on how you test. For example, you could define this function in your test statement:
def test_something(self):
stmts = []
def catch_queries(conn, cursor, statement, ...):
stmts.append(statement)
# Now attach it as a listener and work with the collected events after running your test
The above method is just an inspiration. For extended cases you'd probably like to have a global cache of events that you empty after each test. The reason is that prior to 0.9 (current dev) there is no API to remove event listeners. Thus make one global listener that accesses a global list.
what about the approach of using flask_sqlalchemy.get_debug_queries() btw. this is the methodology used by internal of Flask Debug Toolbar check its source
from flask_sqlalchemy import get_debug_queries
def test_list_with_assuring_queries_count(app, client):
with app.app_context():
# here generating some test data
for _ in range(10):
notebook = create_test_scheduled_notebook_based_on_notebook_file(
db.session, owner='testing_user',
schedule={"kind": SCHEDULE_FREQUENCY_DAILY}
)
for _ in range(100):
create_test_scheduled_notebook_run(db.session, notebook_id=notebook.id)
with app.app_context():
# after resetting the context call actual view we want asserNumOfQueries
client.get(url_for('notebooks.personal_notebooks'))
assert len(get_debug_queries()) == 3
keep in mind that for having reset context and count you have to call with app.app_context() before the exact stuff you want to measure.
Slightly modified version of #omar-tarabai's solution that removes the event listener when exiting the context:
from sqlalchemy import event
class QueryCounter(object):
"""Context manager to count SQLALchemy queries."""
def __init__(self, connection):
self.connection = connection.engine
self.count = 0
def __enter__(self):
event.listen(self.connection, "before_cursor_execute", self.callback)
return self
def __exit__(self, *args, **kwargs):
event.remove(self.connection, "before_cursor_execute", self.callback)
def callback(self, *args, **kwargs):
self.count += 1
Usage:
with QueryCounter(session.connection()) as counter:
session.query(XXX).all()
session.query(YYY).all()
print(counter.count) # 2