Dash Plotly error TypeError: Object of type DataFrame is not JSON serializable - json

Hello I am working with Dash for making dashboard.
Below is my code.
I tried to fix the error but not able to fix, Can anyone look into this?
on chrome i am getting. Error loading layout
I am getting TypeError
import dash_bootstrap_components as dbc
from dash import dcc
import dash_html_components as html
from dash import dash_table
import pandas as pd
import numpy as np
def getData():
return preprocess()
def back_to_df(dictio):
return pd.DataFrame.from_dict(dictio)
tblcols =[{"name": i, "id": i} for i in back_to_df(getData()).columns]
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
body = html.Div([
html.H1("Live rates")
, dbc.Row([
dbc.Col(html.Div([dcc.Interval('graph-update', interval = 80, n_intervals = 0),
dash_table.DataTable(
id = 'table',
data = getData(),
columns=tblcols,
page_size= 10,
style_table={'overflowX': 'auto'},
)]),width=3)
])
])
app.layout = html.Div([body])
#app.callback(
dash.dependencies.Output('table','data'),
[dash.dependencies.Input('graph-update', 'n_intervals')])
def updateTable(n):
return getData()
if __name__ == "__main__":
app.run_server(debug = False, port = 8010)
I tried to fix the error but not able to fix, Can anyone look into this?
I am getting error as follows.
Looking for help for below error. dash pandas plotly dataframe
Traceback (most recent call last):
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\_compat.py", line 39, in reraise
raise value
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "C:\Users\Admin\anaconda3\lib\site-packages\dash\dash.py", line 569, in serve_layout
to_json(layout),
File "C:\Users\Admin\anaconda3\lib\site-packages\dash\_utils.py", line 20, in to_json
return to_json_plotly(value)
File "C:\Users\Admin\anaconda3\lib\site-packages\plotly\io\_json.py", line 124, in to_json_plotly
return json.dumps(plotly_object, cls=PlotlyJSONEncoder, **opts)
File "C:\Users\Admin\anaconda3\lib\json\__init__.py", line 234, in dumps
return cls(
File "C:\Users\Admin\anaconda3\lib\site-packages\_plotly_utils\utils.py", line 59, in encode
encoded_o = super(PlotlyJSONEncoder, self).encode(o)
File "C:\Users\Admin\anaconda3\lib\json\encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Users\Admin\anaconda3\lib\json\encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "C:\Users\Admin\anaconda3\lib\site-packages\_plotly_utils\utils.py", line 136, in default
return _json.JSONEncoder.default(self, obj)
File "C:\Users\Admin\anaconda3\lib\json\encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type DataFrame is not JSON serializable```

Sounds like the getData function is returning a pandas DataFrame directly. That won't work. You'll need to do this:
return df.to_dict(orient='records')
That should work.

Related

How To Handle CCXT Binance Intermittent Network Error

I stumbled across an issue which causes the below script to throw an error every so often, like every other day on average.
The script is being run 24/7 and dozens of instances similar to it are being run simultaneously. That seems to be relevant because as can be seen from the error, it appears to throw it on another instance (different asset than the one being retrieved).
OS: W10
Programming Language version: 3.9
CCXT version: 1.54.87
import ccxt
import pandas_ta as ta
import config
import schedule
import pandas as pd
from datetime import datetime
import time
import socket
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 250)
exchange = ccxt.binance({
'apiKey': config.BINANCE_API_KEY,
'secret': config.BINANCE_API_SECRET,
'enableRateLimit': True,
'options': {
'defaultType': 'future'
},
})
in_position = False
free_balance = exchange.fetch_free_balance()
used_balance = exchange.fetch_used_balance()
free_usd = (free_balance['USDT'])
used_usd = (used_balance['USDT'])
amount = free_usd + used_usd
quantity = 0
new_quantity = 0
def trigger(df):
// strategy
def algo():
print(f"Loading data as of {datetime.now().isoformat()}")
bars = exchange.fetch_ohlcv('BNB/USDT', timeframe='30m', limit=50)
df = pd.DataFrame(bars, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df['time'] = pd.to_datetime(df['time'], unit='ms')
df.set_index(pd.DatetimeIndex(df['time']), inplace=True)
trigger(df)
try:
schedule.every(2).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
except ConnectionResetError:
schedule.every(3).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
except socket.timeout:
schedule.every(3).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
Traceback (most recent call last):
File "C:\Users\", line 699, in urlopen
httplib_response = self._make_request(
File "C:\Users\", line 445, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "C:\Users\", line 440, in _make_request
httplib_response = conn.getresponse()
File "C:\Users\", line 1349, in getresponse
response.begin()
File "C:\Users\", line 316, in begin
version, status, reason = self._read_status()
File "C:\Users\", line 277, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\", line 704, in readinto
return self._sock.recv_into(b)
File "C:\Users\", line 1241, in recv_into
return self.read(nbytes, buffer)
File "C:\Users\", line 1099, in read
return self._sslobj.read(len, buffer)
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\", line 439, in send
resp = conn.urlopen(
File "C:\Users\", line 755, in urlopen
retries = retries.increment(
File "C:\Users\", line 532, in increment
raise six.reraise(type(error), error, _stacktrace)
File "C:\Users\", line 769, in reraise
raise value.with_traceback(tb)
File "C:\Users\", line 699, in urlopen
httplib_response = self._make_request(
File "C:\Users\", line 445, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "C:\Users\", line 440, in _make_request
httplib_response = conn.getresponse()
File "C:\Users\", line 1349, in getresponse
response.begin()
File "C:\Users\", line 316, in begin
version, status, reason = self._read_status()
File "C:\Users\", line 277, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\", line 704, in readinto
return self._sock.recv_into(b)
File "C:\Users\", line 1241, in recv_into
return self.read(nbytes, buffer)
File "C:\Users\", line 1099, in read
return self._sslobj.read(len, buffer)
urllib3.exceptions.ProtocolError: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\", line 571, in fetch
response = self.session.request(
File "C:\Users\", line 542, in request
resp = self.send(prep, **send_kwargs)
File "C:\Users\", line 655, in send
r = adapter.send(request, **kwargs)
File "C:\Users\", line 498, in send
raise ConnectionError(err, request=request)
requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\", line 79, in <module>
schedule.run_pending()
File "C:\Users\", line 780, in run_pending
default_scheduler.run_pending()
File "C:\Users\", line 100, in run_pending
self._run_job(job)
File "C:\Users\", line 172, in _run_job
ret = job.run()
File "C:\Users\", line 661, in run
ret = self.job_func()
File "C:\Users\", line 67, in algo
bars = exchange.fetch_ohlcv('ADA/USDT', timeframe='15m', limit=300)
File "C:\Users\", line 1724, in fetch_ohlcv
response = getattr(self, method)(self.extend(request, params))
File "C:\Users\", line 463, in inner
return entry(_self, **inner_kwargs)
File "C:\Users\", line 4119, in request
response = self.fetch2(path, api, method, params, headers, body)
File "C:\Users\", line 486, in fetch2
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
File "C:\Users\", line 623, in fetch
raise NetworkError(details) from e
ccxt.base.errors.NetworkError: binance GET https://fapi.binance.com/fapi/v1/klines?symbol=ADAUSDT&interval=15m&limit=300
I got the same problem, my browser was able to access the url fine, but pycharm ran with a network error, i m using proxy to access binance.com , and my pycharm proxy setting is manual and Connection detection is normal

How to use marshmallow-sqlalchemy with async code?

I'm trying to use marshmallow-sqlalchemy with aiohttp and I have followed their docs with the basic example and I'm getting an error.
I have this schema:
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from db.customer import Customer
class CustomerSchema(SQLAlchemyAutoSchema):
class Meta:
model = Customer
include_relationships = True
load_instance = True
And then the following code for the query:
from sqlalchemy import select
from db import db_conn
from db.customer import Customer
from queries.schema import CustomerSchema
customer_schema = CustomerSchema()
async def get_all_users():
async with db_conn.get_async_sa_session() as session:
statement = select(Customer)
results = await session.execute(statement)
_ = (results.scalars().all())
print(_)
response = customer_schema.dump(_, many=True)
print(response)
For the first print statement I'm getting
[<db.customer.Customer object at 0x10a183340>, <db.customer.Customer object at 0x10a183940>, <db.customer.Customer object at 0x10b0cd9d0>]
But then it fails with
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.py", line 60, in await_only
raise exc.MissingGreenlet(
sqlalchemy.exc.MissingGreenlet: greenlet_spawn has not been called; can't call await_() here. Was IO attempted in an unexpected place? (Background on this error at: http://sqlalche.me/e/14/xd2s)
So how can I use marshmallow-sqlalchemy to serialize the SqlAlchemy reponse?
Another options (packages, etc) or a generic custom solutions are OK too.
For the time being I'm using this:
statement = select(Customer)
results = await session.execute(statement)
_ = (results.scalars().all())
response = {}
for result in _:
value = {k: (v if not isinstance(v, sqlalchemy.orm.state.InstanceState) else '_') for k, v in result.__dict__.items()}
response[f'customer {value["id"]}'] = value
return response
Full traceback:
Traceback (most recent call last):
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/aiohttp/web_protocol.py", line 422, in _handle_request
resp = await self._request_handler(request)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/aiohttp/web_app.py", line 499, in _handle
resp = await handler(request)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/aiohttp/web_urldispatcher.py", line 948, in _iter
resp = await method()
File "/Users/ruslan/OneDrive/Home/Dev/projects/code/education/other/cft/views/user.py", line 24, in get
await get_all_users()
File "/Users/ruslan/OneDrive/Home/Dev/projects/code/education/other/cft/queries/user.py", line 18, in get_all_users
response = customer_schema.dump(_, many=True)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/schema.py", line 547, in dump
result = self._serialize(processed_obj, many=many)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/schema.py", line 509, in _serialize
return [
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/schema.py", line 510, in <listcomp>
self._serialize(d, many=False)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/schema.py", line 515, in _serialize
value = field_obj.serialize(attr_name, obj, accessor=self.get_attribute)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/fields.py", line 310, in serialize
value = self.get_value(obj, attr, accessor=accessor)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow_sqlalchemy/fields.py", line 27, in get_value
return super(fields.List, self).get_value(obj, attr, accessor=accessor)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/fields.py", line 239, in get_value
return accessor_func(obj, check_key, default)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/schema.py", line 472, in get_attribute
return get_value(obj, attr, default)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/utils.py", line 239, in get_value
return _get_value_for_key(obj, key, default)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/marshmallow/utils.py", line 253, in _get_value_for_key
return getattr(obj, key, default)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/orm/attributes.py", line 480, in __get__
return self.impl.get(state, dict_)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/orm/attributes.py", line 931, in get
value = self.callable_(state, passive)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/orm/strategies.py", line 879, in _load_for_state
return self._emit_lazyload(
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/orm/strategies.py", line 1036, in _emit_lazyload
result = session.execute(
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 1689, in execute
result = conn._execute_20(statement, params or {}, execution_options)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1582, in _execute_20
return meth(self, args_10style, kwargs_10style, execution_options)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/sql/lambdas.py", line 481, in _execute_on_connection
return connection._execute_clauseelement(
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1451, in _execute_clauseelement
ret = self._execute_context(
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1813, in _execute_context
self._handle_dbapi_exception(
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1998, in _handle_dbapi_exception
util.raise_(exc_info[1], with_traceback=exc_info[2])
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/util/compat.py", line 207, in raise_
raise exception
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1770, in _execute_context
self.dialect.do_execute(
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/engine/default.py", line 717, in do_execute
cursor.execute(statement, parameters)
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py", line 449, in execute
self._adapt_connection.await_(
File "/Users/ruslan/.local/share/virtualenvs/cft-RKlbQ9iX/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.py", line 60, in await_only
raise exc.MissingGreenlet(
sqlalchemy.exc.MissingGreenlet: greenlet_spawn has not been called; can't call await_() here. Was IO attempted in an unexpected place? (Background on this error at: http://sqlalche.me/e/14/xd2s)
The problem in this case is that the Marshmallow schema is configured to load related models (include_relationships=True). Since the initial query doesn't load them automatically, the schema triggers a query to fetch them, and this causes the error.
The simplest solution, demonstrated in the docs, is to eagerly load the related objects with their "parent":
async def get_all_users():
async with db_conn.get_async_sa_session() as session:
# Let's assume a Customer has a 1 to many relationship with an Order model
statement = select(Customer).options(orm.selectinload(Customer.orders))
results = await session.execute(statement)
_ = (results.scalars().all())
print(_)
response = customer_schema.dump(_, many=True)
print(response)
There is more discussion in the Preventing Implicit IO when Using AsyncSession section of the docs.

ValueError: arrays must all be same length - Parse the JSON into Pandas DataFrame

import requests
import json
import pandas as pd
data = requests.get("https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson")
json_data = data.json()
with open(r"C:\path\file.json",'w') as outfile:
json.dump(json_data, outfile)
df = pd.read_json(r"C:\path\file.json")
when I tried to parse the json data into Pandas Dataframe, I get the below error:
ValueError: arrays must all be same length
Can anyone help me out in this?
Traceback (most recent call last):
File "c:/path/file.py", line 29, in <module>
df = pd.read_json(r"C:\path\file.json")
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\util\_decorators.py", line 199, in wrapper
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\util\_decorators.py", line 296, in wrapper
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 618, in read_json
result = json_reader.read()
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 755, in read
obj = self._get_object_parser(self.data)
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 777, in _get_object_parser
obj = FrameParser(json, **kwargs).parse()
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 886, in parse
self._parse_no_numpy()
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 1118, in _parse_no_numpy
self.obj = DataFrame(
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py", line 468, in __init__
mgr = init_dict(data, index, columns, dtype=dtype)
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\internals\construction.py", line 283, in init_dict
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\internals\construction.py", line 78, in arrays_to_mgr
index = extract_index(arrays)
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\internals\construction.py", line 397, in extract_index
raise ValueError("arrays must all be same length")
ValueError: arrays must all be same length
Simpler approach is not to save to a file and use json_normalize()
import requests
import json
import pandas as pd
data = requests.get("https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson")
json_data = data.json()
pd.json_normalize(json_data["features"])

How can I use ujson as a Flask encoder/decoder?

I have seen that once can use simplejson as a JSON encoder / decoder within a Flask application like this:
from simplejson import JSONEncoder, JSONDecoder
app.json_encoder = JSONEncoder
app.json_decoder = JSONDecoder
But ujson does not have such objects:
>>> from ujson import JSONEncoder
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: cannot import name 'JSONEncoder' from 'ujson' (/.../site-packages/ujson.cpython-38-x86_64-linux-gnu.so
What I tried
I thought of something like this:
from uuid import UUID, uuid4
import ujson as json
from flask import Flask, jsonify
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, UUID):
return str(obj)
return JSONEncoder.default(self, obj)
def encode(self, o):
return json.dumps(o)
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
#app.route("/")
def index():
return jsonify({"foo": uuid4()})
app.run()
But I'm uncertain because the help to the decoder shows this:
| decode(self, s, _w=<built-in method match of re.Pattern object at 0x7f6a608404b0>, _PY3=True)
| Return the Python representation of ``s`` (a ``str`` or ``unicode``
| instance containing a JSON document)
|
| raw_decode(self, s, idx=0, _w=<built-in method match of re.Pattern object at 0x7f6a608404b0>, _PY3=True)
| Decode a JSON document from ``s`` (a ``str`` or ``unicode``
| beginning with a JSON document) and return a 2-tuple of the Python
| representation and the index in ``s`` where the document ended.
| Optionally, ``idx`` can be used to specify an offset in ``s`` where
| the JSON document begins.
|
| This can be used to decode a JSON document from a string that may
| have extraneous data at the end.
Is my implementation ok? How would I support those other parameters? When is decode and when raw_decode used?
When I run this, I get:
[2020-10-09 10:54:52,063] ERROR in app: Exception on / [GET]
Traceback (most recent call last):
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "main.py", line 28, in index
return jsonify({"foo": uuid4()})
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/json/__init__.py", line 370, in jsonify
dumps(data, indent=indent, separators=separators) + "\n",
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/flask/json/__init__.py", line 211, in dumps
rv = _json.dumps(obj, **kwargs)
File "/home/moose/.pyenv/versions/3.8.3/lib/python3.8/site-packages/simplejson/__init__.py", line 398, in dumps
return cls(
File "main.py", line 14, in encode
return json.dumps(o)
TypeError: UUID('1f45a2bc-c964-48f0-b2f5-9ef7a2557966') is not JSON serializable
You can use a try block like that:
import ujson as json
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
return json.dumps(obj)
except TypeError:
return JSONEncoder.default(self, obj)
from flask import Flask
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder

FileNotFoundError: [Errno 2] File b'Downloads/BetterLifeIndex2015.csv' does not exist: b'Downloads/BetterLifeIndex2015.csv'

Resolved
Answer: Changed the path, it was in fact inncorect path after all. Used absolute path (alt+d+copy from file explorer". Also used "r" before the path so the path is treated like a raw string.
# load the data
BetterLifeIndex = pd.read_csv(r"C:\Users\brede\OneDrive\Dokumenter\Downloads\BetterLifeIndex2015.csv", thousands = ',')
gdp_per_capita = pd.read_csv(r"C:\Users\brede\OneDrive\Dokumenter\Downloads\gdpcapita.csv", thousands= ',', delimiter ='\t',
encoding = 'latin1' , na_values="n/a")
Im new to Python and I'm running a Example from a machine learning book. I cant get python to read my csv file.
Code:
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,
left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
# load the data
oecd_bli = pd.read_csv("Downloads/BetterLifeIndex2015.csv", thousands = ',')
gdp_per_capita = pd.read_csv("C:/Users/brede/Downloads/gdpcapita.csv", thousands= ',', delimiter ='\t',
encoding = 'latin1' , na_values="n/a")
#prepare the data
country_stats = prepare_country_stats (oecd_bli, gdp_per_capita)
x = np.c_[country_stats["gdp per capita"]]
y = np.c_[country_stats["life satisfaction"]]
#visualize the data
country_stats.plot(kind= 'scatter' , x = "GDP per capita", y ='Life satisfaction')
#select a linear model
model = sklearn.linear_model.LinearRegression()
#train the model
model.fit (x, y)
#make a prediction for Cyprus
X_new = [[22587]] #Cyprus GDP per capita
print(model.predict(X_new)) #outputs[[5.96242338]]
The output is:
runfile('C:/Users/brede/Downloads/practice_gdp.py', wdir='C:/Users/brede/Downloads')
Traceback (most recent call last):
File "<ipython-input-59-2f130edd277c>", line 1, in <module>
runfile('C:/Users/brede/Downloads/practice_gdp.py', wdir='C:/Users/brede/Downloads')
File "C:\Users\brede\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\brede\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/brede/Downloads/practice_gdp.py", line 31, in <module>
oecd_bli = pd.read_csv("Downloads/BetterLifeIndex2015.csv", thousands = ',')
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 685, in parser_f
return _read(filepath_or_buffer, kwds)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 457, in _read
parser = TextFileReader(fp_or_buf, **kwds)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 895, in __init__
self._make_engine(self.engine)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 1135, in _make_engine
self._engine = CParserWrapper(self.f, **self.options)
File "C:\Users\brede\Anaconda3\lib\site-packages\pandas\io\parsers.py", line 1917, in __init__
self._reader = parsers.TextReader(src, **kwds)
File "pandas\_libs\parsers.pyx", line 382, in pandas._libs.parsers.TextReader.__cinit__
File "pandas\_libs\parsers.pyx", line 689, in pandas._libs.parsers.TextReader._setup_parser_source
FileNotFoundError: [Errno 2] File b'Downloads/BetterLifeIndex2015.csv' does not exist: b'Downloads/BetterLifeIndex2015.csv'
I have triplechecked the path to the file, and I can't seem to figure this out! All help is appreciated.
This is done in Spyder, also tried in Jupyter with same result. I've even copied the path etc.
help...
I think you have to include'/' in the file path.Try that 'C:/Users/brede/OneDrive....'