Using 'extendData' in a 'dcc.Interval' event will only update my graph when I'm browsing another Chrome tab - plotly-dash

I created a very simple (one page) application in Dash that appends random data to a plotly chart using a dcc.Interval component and the extendData method (I'd like to have x values max).
The program worked like a charm, until I tried to port it to a multi-page application:
I used the following example:
https://github.com/facultyai/dash-bootstrap-components/blob/main/examples/python/templates/multi-page-apps/responsive-collapsible-sidebar/sidebar.py
and replaced:
elif pathname == "/page-1":
return html.P("This is the content of page 1. Yay!")
with:
import page_1
...
elif pathname == "/page-1":
return page_1.layout
My page_1.py contains the following code:
from dash import dcc, html
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
layout = dbc.Card(dbc.CardBody([
html.H4('Live Feed'),
dcc.Graph(id='live-update-graph',
figure=go.Figure({'data':
[
{'x': [], 'y': []},
{'x': [], 'y': []},
{'x': [], 'y': []},
{'x': [], 'y': []}
]
}),
),
dcc.Interval(
id='interval-component',
interval=0.1*1000, # in milliseconds
n_intervals=0
)
]
))
I put my Callback in my app.py file:
#app.callback(Output('live-update-graph', 'extendData'),
Input('interval-component', 'n_intervals')
)
def update_graph_live(n):
# Collect some data
y1 = np.random.normal(loc = 10, scale=10)
y2 = y1 + random.randint(-5, 5)
y3 = y2 + random.randint(-10, 60)
y4 = y3 + random.randint(-40, 2)
return [{'x': [[datetime.datetime.now()]] * 4,'y': [[y1], [y2], [y3], [y4]]}, [0,1, 2, 3], 300]
...
if __name__ == '__main__':
app.run_server(debug=True)
Unfortunatly, my chart will only update when I'm browsing another tab in Chrome, not when I'm displaying it.
I have another page with some other components and an associated callback declared in my app.py file as :
#app.callback(
Output("result-code", "children"),
Input("slider", "value"),
)
def create_python_script(slider):
markdown = markdown_start
markdown += '''
msg = {{
"slider_value": {slider}
}}'''.format(slider=slider)
markdown += markdown_end
return markdown
And my Markdown component is updated in real-time, no problem with that.
Here is a copy of my callback status:
Callback status in Dash
My developper console shows every incoming message in the front-end part:
{
"multi": true,
"response": {
"live-update-graph": {
"extendData": [
{
"x": [
[
"2023-02-13T16:58:37.533426"
],
[
"2023-02-13T16:58:37.533426"
],
[
"2023-02-13T16:58:37.533426"
],
[
"2023-02-13T16:58:37.533426"
]
],
"y": [
[
-4.26648933108117
],
[
-3.2664893310811696
],
[
-8.26648933108117
],
[
-9.26648933108117
]
]
},
[
0,
1,
2,
3
],
300
]
}
}
}
Am I doing something wrong?
Thanks in advance !

I was using http://localhost:8888 instead of http://127.0.0.1:8888 to connect to my web app and somehow didn't see it, and that was preventing the chart from being updated.

Related

Dash Tabulator : "movableRowsConnectedTables" is not working

I’m trying to use the "movableRowsConnectedTables" built-in functionality as explained in the tabulator.js examples
It doesn’t seem to work as expected:
import dash
from dash import html
import dash_bootstrap_components as dbc
import dash_tabulator
columns = [
{ "title": "Name",
"field": "name",}
]
options_from = {
'movableRows' : True,
'movableRowsConnectedTables':"tabulator_to",
'movableRowsReceiver': "add",
'movableRowsSender': "delete",
'height':200,
'placeholder':'No more Rows'
}
options_to = {
'movableRows' : True,
'height':200,
'placeholder':'Drag Here'
}
data = [
{"id":1, "name":"a"},
{"id":2, "name":"b"},
{"id":3, "name":"c"},
]
layout = html.Div(
[
dbc.Row(
[
dbc.Col(
[ html.Header('DRAG FROM HERE'),
dash_tabulator.DashTabulator(
id='tabulator_from',
columns=columns,
options=options_from,
data=data,
),
], width = 6
),
dbc.Col(
[ html.Header('DROP HERE'),
dash_tabulator.DashTabulator(
id='tabulator_to',
columns=columns,
options=options_to,
data = []
),
], width = 6
)
]
)
]
)
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
app.layout = dbc.Container(layout, fluid=True)
if __name__ == '__main__':
app.run_server(debug=True)
Is it also possible to get callbacks when elements were dropped?
It would be great to have this functionality inside dash!
example
im not familiar with tabulator_dash but the table your sending too also needs a 'movableRowsConnectedTables':"tabulator_from" option

Populating a Dash data_table column with switches

Is it possible to populate the column Toggle with ‘mantine’ switches?
I am using the code from the docs that show how to populate a column with dropdowns, but I cannot figure out how to get any further than this.
import dash, dash_table
import dash_html_components as html
import dash_mantine_components as dmc
import pandas as pd
app = dash.Dash(__name__)
df = pd.DataFrame({
'A': [1, 2],
'B': [4, 5],
'C': [7, 8]
})
def toggle_switch():
return dmc.Switch(
size="lg",
radius="sm",
label="Enable this option",
checked=True
)
app.layout = html.Div([
dash_table.DataTable(
id='datatable',
columns=[
{"name": i, "id": i} for i in df.columns
] + [{"name": "Toggle", "id": "toggle"}],
data=df.to_dict('records'),
editable=True,
)
])
if __name__ == '__main__':
app.run_server(debug=True)

Finding Values in Python json.loads Dictionary

I'm working with a REST API that returns data in the following format:
{
"id": "2902cbad6da44459ad05abd1305eed14",
"displayName": "",
"sourceHost": "dev01.test.lan",
"sourceIP": "192.168.145.1",
"messagesPerSecond": 0,
"messages": 2733,
"size": 292062,
"archiveSize": 0,
"dates": [
{
"date": 1624921200000,
"messages": 279,
"size": 29753,
"archiveSize": 0
},
{
"date": 1625007600000,
"messages": 401,
"size": 42902,
"archiveSize": 0
}
]
}
I'm using json.loads to successfully pull the data from the API, and I now need to search for a particular "date:" value and read the corresponding "messages", "size" and "archiveSize" values.
I'm trying to use the "if-in" method to find the value I'm interested in, for example:
response = requests.request("GET", apiQuery, headers=headers, data=payload)
json_response = json.loads(response.text)
test = 2733
if test in json_response.values():
print(f"Yes, value: '{test}' exist in dictionary")
else:
print(f"No, value: '{test}' does not exist in dictionary")
This works fine for any value in the top section of the JSON return, but it never finds any values in the "dates" sub-branches.
I have two questions, firstly, how do I find the target "date" value? Secondly, once I find that "sub-branch" what would be the best way to extract the three values I need?
Thanks.
from json import load
def list_dates_whose_message_count_equals(dates=None, message_count=0):
return list(filter(
lambda date: date.get("messages") == message_count, dates
))
def main():
json_ = {}
with open("values.json", "r") as fp:
json_ = load(fp)
print(list_dates_whose_message_count_equals(json_["dates"], message_count=279))
print(list_dates_whose_message_count_equals(json_["dates"], message_count=401))
if __name__ == "__main__":
main()
Returns this
[{'date': 1624921200000, 'messages': 279, 'size': 29753, 'archiveSize': 0}]
[{'date': 1625007600000, 'messages': 401, 'size': 42902, 'archiveSize': 0}]

Writing Nested JSON Dictionary List To CSV

Issue
I'm trying to write the following nested list of dictionary which has another list of dictionary to csv. I tried multiple ways but I can not get it to properly write it:
Json Data
[
{
"Basic_Information_Source": [
{
"Image": "image1.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 574,
"Image_Height": 262,
"Image_Size": 277274
}
],
"Basic_Information_Destination": [
{
"Image": "image1_dst.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 574,
"Image_Height": 262,
"Image_Size": 277539
}
],
"Values": [
{
"Value1": 75.05045463635267,
"Value2": 0.006097560975609756,
"Value3": 0.045083481733371615,
"Value4": 0.008639858263904898
}
]
},
{
"Basic_Information_Source": [
{
"Image": "image2.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 1600,
"Image_Height": 1066,
"Image_Size": 1786254
}
],
"Basic_Information_Destination": [
{
"Image": "image2_dst.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 1600,
"Image_Height": 1066,
"Image_Size": 1782197
}
],
"Values": [
{
"Value1": 85.52662890580055,
"Value2": 0.0005464352720450282,
"Value3": 0.013496113910369758,
"Value4": 0.003800236380811839
}
]
}
]
Working Code
I tried to use the following code and it works, but it only saved the headers and then dumps all the underlying list as text in the csv file:
import json
import csv
def Convert_CSV():
ar_enc_file = open('analysis_results_enc.json','r')
json_data = json.load(ar_enc_file)
keys = json_data[0].keys()
with open('test.csv', 'w', encoding='utf8', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(json_data)
ar_enc_file.close()
Convert_CSV()
Working Output / Issue with it
The output writes the following header:
Basic_Information_Source
Basic_Information_Destination
Values
And then it dumps all other data inside each header as a list like this:
[{'Image': 'image1.png', 'Image_Format': 'PNG', 'Image_Mode': 'RGB', 'Image_Width': 574, 'Image_Height': 262, 'Image_Size': 277274}]
Expected Output / Sample
Trying to generate the above type of output for each dictionary in the array of dictionaries.
How do it properly write it?
I'm sure someone will come by with a much more elegant solution. That being said:
You have a few problems.
You have inconsistent entries with the fields you want to align.
Even if you pad your data you have intermediate lists that need flattened out.
Then you still have separated data that needs to be merged together.
DictWriter AFAIK expects it's data in the format of [{'column': 'entry'},{'column': 'entry'} so even if you do all the previous steps you're still not in the right format.
So let's get started.
For the first two parts we can combine.
def pad_list(lst, size, padding=None):
# we wouldn't have to make a copy but I prefer to
# avoid the possibility of getting bitten by mutability
_lst = lst[:]
for _ in range(len(lst), size):
_lst.append(padding)
return _lst
# this expects already parsed json data
def flatten(json_data):
lst = []
for dct in json_data:
# here we're just setting a max size of all dict entries
# this is in case the shorter entry is in the first iteration
max_size = 0
# we initialize a dict for each of the list entries
# this is in case you have inconsistent lengths between lists
flattened = dict()
for k, v in dct.items():
entries = list(next(iter(v), dict()).values())
flattened[k] = entries
max_size = max(len(entries), max_size)
# here we append the padded version of the keys for the dict
lst.append({k: pad_list(v, max_size) for k, v in flattened.items()})
return lst
So now we have a flattened, list of dicts whos values are lists of consistent length. Essentially:
[
{
"Basic_Information_Source": [
"image1.png",
"PNG",
"RGB",
574,
262,
277274
],
"Basic_Information_Destination": [
"image1_dst.png",
"PNG",
"RGB",
574,
262,
277539
],
"Values": [
75.05045463635267,
0.006097560975609756,
0.045083481733371615,
0.008639858263904898,
None,
None
]
}
]
But this list has multiple dicts that need to be merged, not just one.
So we need to merge.
# this should be self explanatory
def merge(flattened):
merged = dict()
for dct in flattened:
for k, v in dct.items():
if k not in merged:
merged[k] = []
merged[k].extend(v)
return merged
This gives us something close to this:
{
"Basic_Information_Source": [
"image1.png",
"PNG",
"RGB",
574,
262,
277274,
"image2.png",
"PNG",
"RGB",
1600,
1066,
1786254
],
"Basic_Information_Destination": [
"image1_dst.png",
"PNG",
"RGB",
574,
262,
277539,
"image2_dst.png",
"PNG",
"RGB",
1600,
1066,
1782197
],
"Values": [
75.05045463635267,
0.006097560975609756,
0.045083481733371615,
0.008639858263904898,
None,
None,
85.52662890580055,
0.0005464352720450282,
0.013496113910369758,
0.003800236380811839,
None,
None
]
}
But wait, we still need to format it for the writer.
Our data needs to be in the format of [{'column_1': 'entry', column_2: 'entry'},{'column_1': 'entry', column_2: 'entry'}
So we format:
def format_for_writer(merged):
formatted = []
for k, v in merged.items():
for i, item in enumerate(v):
# on the first pass this will append an empty dict
# on subsequent passes it will be ignored
# and add keys into the existing dict
if i >= len(formatted):
formatted.append(dict())
formatted[i][k] = item
return formatted
So finally, we have a nice clean formatted data structure we can just hand to our writer function.
def convert_csv(formatted):
keys = formatted[0].keys()
with open('test.csv', 'w', encoding='utf8', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(formatted)
Full code with json string:
import json
import csv
json_raw = """\
[
{
"Basic_Information_Source": [
{
"Image": "image1.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 574,
"Image_Height": 262,
"Image_Size": 277274
}
],
"Basic_Information_Destination": [
{
"Image": "image1_dst.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 574,
"Image_Height": 262,
"Image_Size": 277539
}
],
"Values": [
{
"Value1": 75.05045463635267,
"Value2": 0.006097560975609756,
"Value3": 0.045083481733371615,
"Value4": 0.008639858263904898
}
]
},
{
"Basic_Information_Source": [
{
"Image": "image2.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 1600,
"Image_Height": 1066,
"Image_Size": 1786254
}
],
"Basic_Information_Destination": [
{
"Image": "image2_dst.png",
"Image_Format": "PNG",
"Image_Mode": "RGB",
"Image_Width": 1600,
"Image_Height": 1066,
"Image_Size": 1782197
}
],
"Values": [
{
"Value1": 85.52662890580055,
"Value2": 0.0005464352720450282,
"Value3": 0.013496113910369758,
"Value4": 0.003800236380811839
}
]
}
]
"""
def pad_list(lst, size, padding=None):
_lst = lst[:]
for _ in range(len(lst), size):
_lst.append(padding)
return _lst
def flatten(json_data):
lst = []
for dct in json_data:
max_size = 0
flattened = dict()
for k, v in dct.items():
entries = list(next(iter(v), dict()).values())
flattened[k] = entries
max_size = max(len(entries), max_size)
lst.append({k: pad_list(v, max_size) for k, v in flattened.items()})
return lst
def merge(flattened):
merged = dict()
for dct in flattened:
for k, v in dct.items():
if k not in merged:
merged[k] = []
merged[k].extend(v)
return merged
def format_for_writer(merged):
formatted = []
for k, v in merged.items():
for i, item in enumerate(v):
if i >= len(formatted):
formatted.append(dict())
formatted[i][k] = item
return formatted
def convert_csv(formatted):
keys = formatted[0].keys()
with open('test.csv', 'w', encoding='utf8', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(formatted)
def main():
json_data = json.loads(json_raw)
flattened = flatten(json_data)
merged = merge(flattened)
formatted = format_for_writer(merged)
convert_csv(formatted)
if __name__ == '__main__':
main()

Can we create JSON using JsonBuilder for the following JSON?

I want to create the below JSON using JsonBuilder.
"isOut": false,
"baleRun": {
"incData": true,
"appendCricket": [{
"min": 10,
"max": 32,
"price": "10"
}]
}
I have tried below code to create it:-
import groovy.json.*
def builder = new JsonBuilder()
def root = builder.baleRun{
incData true
builder.appendCricket [
{
min 10
max 32
price "10000"
}
]
}
Getting below error:-
groovy.lang.MissingPropertyException: No such property: appendCricket for
class: groovy.json.JsonBuilder error.
Any idea how to produce this?
The simplest way is to build a Map for the data you want, then pass this to the builder in the constructor:
import groovy.json.*
def data = [
isOut: false,
baleRun: [
incData: true,
appendCricket: [
[min: 10, max: 32, price: '10']
]
]
]
def json = new JsonBuilder(data).toString()
It is easy to create it using map, like #tim_yates suggested.
Of course, it is also possible to create the way you started with. Should be taken care for array, see in line:
def json = new groovy.json.JsonBuilder()
json {
isOut false
baleRun {
incData true
appendCricket( [
{
min 10
max 32
price "10000"
}
])
}
}
println json.toPrettyString()
Quickly try the same online demo