I am using below code as suggested in https://docs.ray.io/en/master/serve/getting_started.html for my 2 gpus.
from starlette.requests import Request
import ray
from ray import serve
from transformers import pipeline
from parallel import *
#serve.deployment(num_replicas=2, ray_actor_options={"num_cpus": 0, "num_gpus": 1})
class Translator:
def init(self):
self.model = get_model()#pipeline("translation_en_to_fr", model="t5-small")
def translate(self, count: int) -> int:
model_output = predict(self.model, count)#self.model(text)
return 'translation'
async def __call__(self, http_request: Request) -> str:
count: str = await http_request.json()
return self.translate(count)
translator = Translator.bind()
I have other file which loads the model and predict.
This is how, model is loaded:
def get_model():
model = LayoutLMv2ForQuestionAnswering.from_pretrained(model_checkpoint_finetuned)
print('model loaded in device')
return model
I don't see any gpus being used while predicting. It just uses CPU.
Can anyone help here?
I believe you need to make sure model is set on the device (i.e., via model.to("cuda")).
https://huggingface.co/docs/transformers/perf_train_gpu_one
Related
I am trying to convert a pre-saved PyTorch model into a TensorFlow one via ONNX. For now, the following code is to export the model into .onnx format. The neural network has 2 inputs, one hidden layer with 5 neurons and a scalar output.
Here's the code I'm working with:
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class Model(nn.Module):
def __init__(self, n_h_layers, n_h_neurons, dim_in, dim_out, in_bound, out_bound):
super(Model,self).__init__()
self.n_h_layers=n_h_layers
self.n_h_neurons=n_h_neurons
self.dim_in=dim_in
self.dim_out=dim_out
self.in_bound=in_bound
self.out_bound=out_bound
layer_input = [nn.Linear(dim_in, n_h_neurons, bias=True)]
layer_output = [nn.ReLU(), nn.Linear(n_h_neurons, dim_out, bias=True), nn.Hardtanh(in_bound, out_bound)]
# hidden layer
module_hidden = [[nn.ReLU(), nn.Linear(n_h_neurons, n_h_neurons, bias=True)] for _ in range(n_h_layers - 1)]
layer_hidden = list(np.array(module_hidden).flatten())
# nn model
layers = layer_input + layer_hidden + layer_output
self.model = nn.Sequential(*layers)
print(self.model)
trained_nn=torch.load('path')
trained_model=Model(1,5,2,1,-1,1)
trained_model.load_state_dict(trained_nn,strict=False)
dummy_input=Variable(torch.randn(1,2))
torch.onnx.export(trained_model,dummy_input, 'file.onnx', verbose=True)
I have two problems:
Running this snippet raises "NonImplementedError" in _forward_unimplemented in module.py as follows:
File ".../anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 201, in _forward_unimplemented
raise NotImplementedError
NotImplementedError
I am not aware with Exception handling in python and I do not know what I must change in order to tackle the error.
When I print trained_nn, this is what it gives me:
OrderedDict([('0.weight',
tensor([[ 0.2035, -0.7679],
[ 1.6368, -0.4135],
[-0.0908, -0.2335],
[ 1.3731, -0.3135],
[ 0.6361, 0.2521]])),
('0.bias', tensor([-1.6907, 0.7262, 1.4032, 1.2551, 0.8013])),
('2.weight',
tensor([[-0.4603, -0.0719, 0.4082, -1.0235, -0.0538]])),
('2.bias', tensor([-1.1568]))])
However, printing trained_model.state_dict() gives me a neural network with a completely different set of weights and biases, although I believe that it should be giving me the exact same model as before as this is what I need to save as onnx file?
OrderedDict([('model.0.weight',
tensor([[ 0.4817, 0.0928],
[-0.4313, 0.1253],
[ 0.6681, -0.4029],
[ 0.6474, 0.0029],
[-0.4663, 0.5029]])),
('model.0.bias',
tensor([-0.2292, 0.6674, -0.3755, 0.0778, 0.0527])),
('model.2.weight',
tensor([[-0.2097, -0.3029, 0.2792, 0.2596, 0.1362]])),
('model.2.bias', tensor([-0.1835]))])
Not sure what mistakes I'm making. Any help is appreciated.
When you are making a subclass of nn.Module you need to implement forward method. In your case you need to add:
class Model(nn.Module):
def __init__(self, n_h_layers, n_h_neurons, dim_in, dim_out, in_bound, out_bound):
super(Model, self).__init__()
...
def forward(self, x):
return self.model(x)
The names of parameters does not match:
model.0.weight != 0.weight
model.0.bias != 0.bias
prefix model is missed.
So when you call load_state_dict() with strict=False the parameters will not be used.
You can rename the parameters to match the model:
trained_nn = torch.load('path')
trained_nn = {f'model.{name}': w for name, w in trained_nn.items()}
trained_model.load_state_dict(trained_nn, strict=True)
I am having trouble in making things work with a Custom ParallelEnv I wrote by using PettingZoo. I am using SuperSuit's ss.pettingzoo_env_to_vec_env_v1(env) as a wrapper to Vectorize the environment and make it work with Stable-Baseline3 and documented here.
You can find attached a summary of the most relevant part of the code:
from typing import Optional
from gym import spaces
import random
import numpy as np
from pettingzoo import ParallelEnv
from pettingzoo.utils.conversions import parallel_wrapper_fn
import supersuit as ss
from gym.utils import EzPickle, seeding
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
petting_zoo = env
class parallel_env(ParallelEnv, EzPickle):
metadata = {'render_modes': ['ansi'], "name": "PlayerEnv-Multi-v0"}
def __init__(self, n_agents: int = 20, new_step_api: bool = True) -> None:
EzPickle.__init__(
self,
n_agents,
new_step_api
)
self._episode_ended = False
self.n_agents = n_agents
self.possible_agents = [
f"player_{idx}" for idx in range(n_agents)]
self.agents = self.possible_agents[:]
self.agent_name_mapping = dict(
zip(self.possible_agents, list(range(len(self.possible_agents))))
)
self.observation_spaces = spaces.Dict(
{agent: spaces.Box(shape=(len(self.agents),),
dtype=np.float64, low=0.0, high=1.0) for agent in self.possible_agents}
)
self.action_spaces = spaces.Dict(
{agent: spaces.Discrete(4) for agent in self.possible_agents}
)
self.current_step = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def __calculate_observation(self, agent_id: int) -> np.ndarray:
return self.observation_space(agent_id).sample()
def __calculate_observations(self) -> np.ndarray:
observations = {
agent: self.__calculate_observation(
agent_id=agent)
for agent in self.agents
}
return observations
def observe(self, agent):
return self.__calculate_observation(agent_id=agent)
def step(self, actions):
if self._episode_ended:
return self.reset()
observations = self.__calculate_observations()
rewards = random.sample(range(100), self.n_agents)
self.current_step += 1
self._episode_ended = self.current_step >= 100
infos = {agent: {} for agent in self.agents}
dones = {agent: self._episode_ended for agent in self.agents}
rewards = {
self.agents[i]: rewards[i]
for i in range(len(self.agents))
}
if self._episode_ended:
self.agents = {} # To satisfy `set(par_env.agents) == live_agents`
return observations, rewards, dones, infos
def reset(self,
seed: Optional[int] = None,
return_info: bool = False,
options: Optional[dict] = None,):
self.agents = self.possible_agents[:]
self._episode_ended = False
self.current_step = 0
observations = self.__calculate_observations()
return observations
def render(self, mode="human"):
# TODO: IMPLEMENT
print("TO BE IMPLEMENTED")
def close(self):
pass
Unfortunately when I try to test with the following main procedure:
from stable_baselines3 import DQN, PPO
from stable_baselines3.common.env_checker import check_env
from dummy_env import dummy
from pettingzoo.test import parallel_api_test
if __name__ == '__main__':
# Testing the parallel algorithm alone
env_parallel = dummy.parallel_env()
parallel_api_test(env_parallel) # This works!
# Testing the environment with the wrapper
env = dummy.petting_zoo()
# ERROR: AssertionError: The observation returned by the `reset()` method does not match the given observation space
check_env(env)
# Model initialization
model = PPO("MlpPolicy", env, verbose=1)
# ERROR: ValueError: could not broadcast input array from shape (20,20) into shape (20,)
model.learn(total_timesteps=10_000)
I get the following error:
AssertionError: The observation returned by the `reset()` method does not match the given observation space
If I skip check_env() I get the following one:
ValueError: could not broadcast input array from shape (20,20) into shape (20,)
It seems like that ss.pettingzoo_env_to_vec_env_v1(env) is capable of splitting the parallel environment in multiple vectorized ones, but not for the reset() function.
Does anyone know how to fix this problem?
Plese find the Github Repository to reproduce the problem.
You should double check the reset() function in PettingZoo. It will return None instead of an observation like GYM
Thanks to discussion I had in the issue section of the SuperSuit repository, I am able to post the solution to the problem. Thanks to jjshoots!
First of all it is necessary to have the latest SuperSuit version. In order to get that I needed to install Stable-Baseline3 using the instructions here to make it work with gym 0.24+.
After that, taking the code in the question as example, it is necessary to substitute
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
#env_ = ss.concat_vec_envs_v1(env_, 1)
return env_
with
def env(**kwargs):
env_ = parallel_env(**kwargs)
env_ = ss.pettingzoo_env_to_vec_env_v1(env_)
env_ = ss.concat_vec_envs_v1(env_, 1, base_class="stable_baselines3")
return env_
The outcomes are:
Outcome 1: leaving the line with check_env(env) I got an error AssertionError: Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py
Outcome 2: removing the line with check_env(env), the agent starts training successfully!
In the end, I think that the argument base_class="stable_baselines3" made the difference.
Only the small problem on check_env remains to be reported, but I think it can be considered as trivial if the training works.
I'm working on math method and to reduce execution time I use numba decorator
#numba.jit(nopython=True, nogil=True, cache=True)
def analize_tick(data:np.array, index:int, result_signal:np.array) -> None:
##I perform an action here and then return result
result_sirnal[0]=1
it works OK, but when I changed the decorator from #numba.jit(nopython=True, nogil=True, cache=True) to #cuda.jit(device=True) I got the error: 'DeviceFunctionTemplate' object is not callable
Could you advice me how to fix this issue?
BTW the method recieves three arguments:
numpy 2 dimensional float array
int index
numpy 1 dimensional int array where I return result
UPDATED to add code sample:
import unittest
import pandas as pd
import numpy as np
import numba
from numba import cuda
#numba.jit(nopython=True, nogil=True, cache=True)
# #cuda.jit(device=True)
def calculate(data:np.array, index:int, options:np.array, result_signal:np.array) -> None:
i = data[0]
b = data[1]
result_signal[0]= i+b
#numba.jit(nopython=True, nogil=True, cache=True)
# #cuda.jit(device=True)
def for_each(data:np.array,options:np.array, result:np.array) -> None:
for index, r in enumerate(data):
calculate(r, index, options, result)
# print(result[0])
class cuda_test(unittest.TestCase):
def test_numba_call(self):
df = pd.DataFrame([[1, 1], [2, 2]], columns=['c0', 'c1'])
data = df.to_numpy()
result = np.array([0], dtype=float)
options = np.array([0], dtype=float)
for sigma in range(0, 10, 1):
options[0] = sigma
for_each(data, options, result)
Could you advice me how to fix this issue?
There is no way to fix this. What you are trying to do is impossible.
When you decorate a function like this:
#cuda.jit(device=True)
def for_each(data:np.array,options:np.array, result:np.array) -> None:
for index, r in enumerate(data):
calculate(r, index, options, result)
you are denoting that the function is only available to be called by CUDA kernels or other device functions. You are not calling it within a CUDA kernel or device function. There is no way to change this behaviour, it is a limitation of the language.
I want to run multiple strategies in concurrent processes. I came up with something like this
import logging
import multiprocessing
import os
from sqlalchemy.orm import scoped_session, Session
from pyutil.sql.interfaces.symbols.symbol import Symbol
from pyutil.sql.session import get_one_or_create
class StratRunner(object):
def __init__(self, session_scope, logger=None):
assert isinstance(session_scope, scoped_session)
self.__session_scope = session_scope
self.__logger = logger or logging.getLogger(__name__)
# this function is the target for mp.Process
def _run(self, strategy):
self.__logger.debug("Pid {pid}".format(pid=os.getpid()))
symbols = self.symbols
self.__logger.info("Run strategy {s}".format(s=strategy))
configuration = strategy.configuration()
strategy.upsert(portfolio=configuration.portfolio, symbols=symbols, days=5)
def run_strategies(self):
# loop over all active strategies!
jobs = []
# we are in the main thread here...
for s in self.active_strategies:
# what shall I give to the Process? The strategy object, the strategy_id, a session instance, the session_scope...
job = multiprocessing.Process(target=self._run, kwargs={"strategy": s})
job.name = s.name
jobs.append(job)
run_jobs(jobs, logger=self.__logger)
#property
def symbols(self):
return {s.name: s for s in self.__session_scope().query(Symbol)}
#property
def active_strategies(self):
return self.__session_scope().query(Strategy).filter(Strategy.active == True).all()
I am aware of tons of documentation on this project but I am overwhelmed.
I loop over the rows of a table (The active_strategies). class Strategies(Base)... . I then hand over the strategy object to the _run method and update the strategy object within the very same method. Please feel free to shred my code.
I am in particular puzzled about what to give to the _run method? Shall I hand over the strategy object, the strategy ID, the session, the scoped_session, ... ?
I have now created a runner object:
import abc
import logging
import os
from sqlalchemy.orm import sessionmaker
class Runner(object):
__metaclass__ = abc.ABCMeta
def __init__(self, engine, logger=None):
self.__engine = engine
self._logger = logger or logging.getLogger(__name__)
self.__jobs = []
#property
def _session(self):
""" Create a fresh new session... """
self.__engine.dispose()
factory = sessionmaker(self.__engine)
return factory()
def _run_jobs(self):
self._logger.debug("PID main {pid}".format(pid=os.getpid()))
for job in self.jobs:
# all jobs get the trigge
self._logger.info("Job {j}".format(j=job.name))
job.start()
for job in self.jobs:
self._logger.info("Wait for job {j}".format(j=job.name))
job.join()
self._logger.info("Job {j} done".format(j=job.name))
#property
def jobs(self):
return self.__jobs
#abc.abstractmethod
def run(self):
""" Described in the child class """
In particular this class can provide a fresh session (via ._session). However, using this setup I see plenty of :
psycopg2.OperationalError: server closed the connection unexpectedly
| This probably means the server terminated abnormally
| before or while processing the request.
In Django I often assert the number of queries that should be made so that unit tests catch new N+1 query problems
from django import db
from django.conf import settings
settings.DEBUG=True
class SendData(TestCase):
def test_send(self):
db.connection.queries = []
event = Events.objects.all()[1:]
s = str(event) # QuerySet is lazy, force retrieval
self.assertEquals(len(db.connection.queries), 2)
In in SQLAlchemy tracing to STDOUT is enabled by setting the echo flag on
engine
engine.echo=True
What is the best way to write tests that count the number of queries made by SQLAlchemy?
class SendData(TestCase):
def test_send(self):
event = session.query(Events).first()
s = str(event)
self.assertEquals( ... , 2)
I've created a context manager class for this purpose:
class DBStatementCounter(object):
"""
Use as a context manager to count the number of execute()'s performed
against the given sqlalchemy connection.
Usage:
with DBStatementCounter(conn) as ctr:
conn.execute("SELECT 1")
conn.execute("SELECT 1")
assert ctr.get_count() == 2
"""
def __init__(self, conn):
self.conn = conn
self.count = 0
# Will have to rely on this since sqlalchemy 0.8 does not support
# removing event listeners
self.do_count = False
sqlalchemy.event.listen(conn, 'after_execute', self.callback)
def __enter__(self):
self.do_count = True
return self
def __exit__(self, *_):
self.do_count = False
def get_count(self):
return self.count
def callback(self, *_):
if self.do_count:
self.count += 1
Use SQLAlchemy Core Events to log/track queries executed (you can attach it from your unit tests so they don't impact your performance on the actual application:
event.listen(engine, "before_cursor_execute", catch_queries)
Now you write the function catch_queries, where the way depends on how you test. For example, you could define this function in your test statement:
def test_something(self):
stmts = []
def catch_queries(conn, cursor, statement, ...):
stmts.append(statement)
# Now attach it as a listener and work with the collected events after running your test
The above method is just an inspiration. For extended cases you'd probably like to have a global cache of events that you empty after each test. The reason is that prior to 0.9 (current dev) there is no API to remove event listeners. Thus make one global listener that accesses a global list.
what about the approach of using flask_sqlalchemy.get_debug_queries() btw. this is the methodology used by internal of Flask Debug Toolbar check its source
from flask_sqlalchemy import get_debug_queries
def test_list_with_assuring_queries_count(app, client):
with app.app_context():
# here generating some test data
for _ in range(10):
notebook = create_test_scheduled_notebook_based_on_notebook_file(
db.session, owner='testing_user',
schedule={"kind": SCHEDULE_FREQUENCY_DAILY}
)
for _ in range(100):
create_test_scheduled_notebook_run(db.session, notebook_id=notebook.id)
with app.app_context():
# after resetting the context call actual view we want asserNumOfQueries
client.get(url_for('notebooks.personal_notebooks'))
assert len(get_debug_queries()) == 3
keep in mind that for having reset context and count you have to call with app.app_context() before the exact stuff you want to measure.
Slightly modified version of #omar-tarabai's solution that removes the event listener when exiting the context:
from sqlalchemy import event
class QueryCounter(object):
"""Context manager to count SQLALchemy queries."""
def __init__(self, connection):
self.connection = connection.engine
self.count = 0
def __enter__(self):
event.listen(self.connection, "before_cursor_execute", self.callback)
return self
def __exit__(self, *args, **kwargs):
event.remove(self.connection, "before_cursor_execute", self.callback)
def callback(self, *args, **kwargs):
self.count += 1
Usage:
with QueryCounter(session.connection()) as counter:
session.query(XXX).all()
session.query(YYY).all()
print(counter.count) # 2