How to extend ‘text’ field in Graph.ExtendData with Plotly-Dash? - plotly-dash

I have an animated graph that I update in a clientside callback. However, I want to update the text as well as the x and y values of the traces in Graph.extendData(), but it seems that that doesn't work. Is there something I'm missing? Alternatively, is there a different method I should be using instead?
Adopting the code from this post (Plotly/Dash display real time data in smooth animation), I'd like something like this, but where updating the text with extendData actually worked:
import dash
import dash_html_components as html
import dash_core_components as dcc
import numpy as np
from dash.dependencies import Input, Output, State
# Example data (a circle).
resolution = 1000
t = np.linspace(0, np.pi * 2, resolution)
x, y = np.cos(t), np.sin(t)
text = str(t)
# Example app.
figure = dict(data=[{'x': [], 'y': []}], text = [], layout=dict(xaxis=dict(range=[-1, 1]), yaxis=dict(range=[-1, 1])))
app = dash.Dash(__name__, update_title=None) # remove "Updating..." from title
app.layout = html.Div([
dcc.Graph(id='graph', figure=dict(figure)), dcc.Interval(id="interval", interval=25),
dcc.Store(id='offset', data=0), dcc.Store(id='store', data=dict(x=x, y=y, text=text, resolution=resolution)),
])
# This makes the graph fail to draw instead of just extending the text as wel!
app.clientside_callback(
"""
function (n_intervals, data, offset) {
offset = offset % data.x.length;
const end = Math.min((offset + 10), data.x.length);
return [[{x: [data.x.slice(offset, end)], y: [data.y.slice(offset, end)], text: [data.text.slice(offset, end)]}, [0], 500], end]
}
""",
[Output('graph', 'extendData'), Output('offset', 'data')],
[Input('interval', 'n_intervals')], [State('store', 'data'), State('offset', 'data')]
)
if __name__ == '__main__':
app.run_server()
Alternatively, is there a different method I should be using instead?

Related

Why is RandomCrop with size 84 and padding 8 returning an image size of 84 and not 100 in pytorch?

I was using the mini-imagenet data set and noticed this line of code:
elif data_augmentation == 'lee2019:
normalize = Normalize(
mean=[120.39586422 / 255.0, 115.59361427 / 255.0, 104.54012653 / 255.0],
std=[70.68188272 / 255.0, 68.27635443 / 255.0, 72.54505529 / 255.0],
)
train_data_transforms = Compose([
ToPILImage(),
RandomCrop(84, padding=8),
ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
RandomHorizontalFlip(),
ToTensor(),
normalize,
])
test_data_transforms = Compose([
normalize,
])
but when I checked the image size it was 84 instead of 100 (after adding padding):
X.size()=torch.Size([50, 3, 84, 84])
what is going on with this? Shouldn't it be 100?
reproduction:
import random
from typing import Callable
import learn2learn as l2l
import numpy as np
import torch
from learn2learn.data import TaskDataset, MetaDataset, DataDescription
from learn2learn.data.transforms import TaskTransform
from torch.utils.data import Dataset
class IndexableDataSet(Dataset):
def __init__(self, datasets):
self.datasets = datasets
def __len__(self) -> int:
return len(self.datasets)
def __getitem__(self, idx: int):
return self.datasets[idx]
class SingleDatasetPerTaskTransform(Callable):
"""
Transform that samples a data set first, then creates a task (e.g. n-way, k-shot) and finally
applies the remaining task transforms.
"""
def __init__(self, indexable_dataset: IndexableDataSet, cons_remaining_task_transforms: Callable):
"""
:param: cons_remaining_task_transforms; constructor that builds the remaining task transforms. Cannot be a list
of transforms because we don't know apriori which is the data set we will use. So this function should be of
type MetaDataset -> list[TaskTransforms] i.e. given the dataset it returns the transforms for it.
"""
self.indexable_dataset = MetaDataset(indexable_dataset)
self.cons_remaining_task_transforms = cons_remaining_task_transforms
def __call__(self, task_description: list):
"""
idea:
- receives the index of the dataset to use
- then use the normal NWays l2l function
"""
# - this is what I wish could have gone in a seperate callable transform, but idk how since the transforms take apriori (not dynamically) which data set to use.
i = random.randint(0, len(self.indexable_dataset) - 1)
task_description = [DataDescription(index=i)] # using this to follow the l2l convention
# - get the sampled data set
dataset_index = task_description[0].index
dataset = self.indexable_dataset[dataset_index]
dataset = MetaDataset(dataset)
# - use the sampled data set to create task
remaining_task_transforms: list[TaskTransform] = self.cons_remaining_task_transforms(dataset)
description = None
for transform in remaining_task_transforms:
description = transform(description)
return description
def sample_dataset(dataset):
def sample_random_dataset(x):
print(f'{x=}')
i = random.randint(0, len(dataset) - 1)
return [DataDescription(index=i)]
# return dataset[i]
return sample_random_dataset
def get_task_transforms(dataset: IndexableDataSet) -> list[TaskTransform]:
"""
:param dataset:
:return:
"""
transforms = [
sample_dataset(dataset),
l2l.data.transforms.NWays(dataset, n=5),
l2l.data.transforms.KShots(dataset, k=5),
l2l.data.transforms.LoadData(dataset),
l2l.data.transforms.RemapLabels(dataset),
l2l.data.transforms.ConsecutiveLabels(dataset),
]
return transforms
def print_datasets(dataset_lst: list):
for dataset in dataset_lst:
print(f'\n{dataset=}\n')
def get_indexable_list_of_datasets_mi_and_cifarfs(root: str = '~/data/l2l_data/') -> IndexableDataSet:
from learn2learn.vision.benchmarks import mini_imagenet_tasksets
datasets, transforms = mini_imagenet_tasksets(root=root)
mi = datasets[0].dataset
from learn2learn.vision.benchmarks import cifarfs_tasksets
datasets, transforms = cifarfs_tasksets(root=root)
cifarfs = datasets[0].dataset
dataset_list = [mi, cifarfs]
dataset_list = [l2l.data.MetaDataset(dataset) for dataset in dataset_list]
dataset = IndexableDataSet(dataset_list)
return dataset
# -- tests
def loop_through_l2l_indexable_datasets_test():
"""
:return:
"""
# - for determinism
random.seed(0)
torch.manual_seed(0)
np.random.seed(0)
# - options for number of tasks/meta-batch size
batch_size: int = 10
# - create indexable data set
indexable_dataset: IndexableDataSet = get_indexable_list_of_datasets_mi_and_cifarfs()
# - get task transforms
def get_remaining_transforms(dataset: MetaDataset) -> list[TaskTransform]:
remaining_task_transforms = [
l2l.data.transforms.NWays(dataset, n=5),
l2l.data.transforms.KShots(dataset, k=5),
l2l.data.transforms.LoadData(dataset),
l2l.data.transforms.RemapLabels(dataset),
l2l.data.transforms.ConsecutiveLabels(dataset),
]
return remaining_task_transforms
task_transforms: TaskTransform = SingleDatasetPerTaskTransform(indexable_dataset, get_remaining_transforms)
# -
taskset: TaskDataset = TaskDataset(dataset=indexable_dataset, task_transforms=task_transforms)
# - loop through tasks
for task_num in range(batch_size):
print(f'{task_num=}')
X, y = taskset.sample()
print(f'{X.size()=}')
print(f'{y.size()=}')
print(f'{y=}')
print()
print('-- end of test --')
# -- Run experiment
if __name__ == "__main__":
import time
from uutils import report_times
start = time.time()
# - run experiment
loop_through_l2l_indexable_datasets_test()
# - Done
print(f"\nSuccess Done!: {report_times(start)}\a")
context: https://github.com/learnables/learn2learn/issues/333
crossposted:
https://discuss.pytorch.org/t/why-is-randomcrop-with-size-84-and-padding-8-returning-an-image-size-of-84-and-not-100-in-pytorch/151463
https://www.reddit.com/r/pytorch/comments/uno1ih/why_is_randomcrop_with_size_84_and_padding_8/
The padding is applied to the input image or tensor before applying the random crop. Ultimately, the output image has a spatial size equal to that of the provided size(s) given to the T.RandomCrop function since the operation is performed after.
After all, it makes more sense to pad the input image rather than the cropped image, doesn't it?

Issues in creating pysimplegui or Tkinter graph GUI by reading csv file , cleaning it and plotting graph (histogram+PDF)

I want to create GUI which should automatically clean data in csv file once selected and plot superimposed PDF & histogram graph. I have uploaded basic python program which generates the required graph but I am unbale to convert it into interface. I guess, only "open file" & "plot" buttons would suffice the requirement. image- want to retrieve data from 'N'th column (13) only with skipping top 4 rows
I am basically from metallurgy background and trying my hands in this field.
Any help would be greatly appreciated
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
raw_data = pd.read_csv("D:/Project/Python/NDC/Outlier_ND/800016_DAT.csv",skiprows=4,header=None)
clean = pd.DataFrame(raw_data)
data1 = clean.iloc[:, [13]]
Q1 = data1.quantile(0.25)
Q3 = data1.quantile(0.75)
IQR = Q3 - Q1
data_IQR = data1[~((data1 < (Q1 - 1.5 * IQR)) |(data1 > (Q3 + 1.5 * IQR))).any(axis=1)]
data_IQR.shape
print(data1.shape)
print(data_IQR.shape)
headerList = ['Actual_MR']
data_IQR.to_csv(r'D:\Project\Python\NDC\Outlier_ND\800016_DAT_IQR.csv', header=headerList, index=False)
data = pd.read_csv("D:/Project/Python/NDC/Outlier_ND/800016_DAT_IQR.csv")
mean, sd = norm.fit(data)
plt.hist(data, bins=25, density=True, alpha=0.6, facecolor = '#2ab0ff', edgecolor='#169acf', linewidth=0.5)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mean, sd)
plt.plot(x, p, 'red', linewidth=2)
title = " Graph \n mean: {:.2f} and SD: {:.2f}".format(mean, sd)
plt.title(title)
plt.xlabel('MR')
plt.ylabel('Pr')
plt.show()
Following code demo how PySimpleGUI to work with matplotlib, detail please find all remark in script.
import math, random
from pathlib import Path
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import PySimpleGUI as sg
# 1. Define the class as the interface between matplotlib and PySimpleGUI
class Canvas(FigureCanvasTkAgg):
"""
Create a canvas for matplotlib pyplot under tkinter/PySimpleGUI canvas
"""
def __init__(self, figure=None, master=None):
super().__init__(figure=figure, master=master)
self.canvas = self.get_tk_widget()
self.canvas.pack(side='top', fill='both', expand=1)
# 2. create PySimpleGUI window, a fixed-size Frame with Canvas which expand in both x and y.
font = ("Courier New", 11)
sg.theme("DarkBlue3")
sg.set_options(font=font)
layout = [
[sg.Input(expand_x=True, key='Path'),
sg.FileBrowse(file_types=(("ALL CSV Files", "*.csv"), ("ALL Files", "*.*"))),
sg.Button('Plot')],
[sg.Frame("", [[sg.Canvas(background_color='green', expand_x=True, expand_y=True, key='Canvas')]], size=(640, 480))],
[sg.Push(), sg.Button('Exit')]
]
window = sg.Window('Matplotlib', layout, finalize=True)
# 3. Create a matplotlib canvas under sg.Canvas or sg.Graph
fig = Figure(figsize=(5, 4), dpi=100)
ax = fig.add_subplot()
canvas = Canvas(fig, window['Canvas'].Widget)
# 4. initial for figure
ax.set_title(f"Sensor Data")
ax.set_xlabel("X axis")
ax.set_ylabel("Y axis")
ax.set_xlim(0, 1079)
ax.set_ylim(-1.1, 1.1)
ax.grid()
canvas.draw() # do Update to GUI canvas
# 5. PySimpleGUI event loop
while True:
event, values = window.read()
if event in (sg.WINDOW_CLOSED, 'Exit'):
break
elif event == 'Plot':
"""
path = values['Path']
if not Path(path).is_file():
continue
"""
# 6. Get data from path and plot from here
ax.cla() # Clear axes first if required
ax.set_title(f"Sensor Data")
ax.set_xlabel("X axis")
ax.set_ylabel("Y axis")
ax.grid()
theta = random.randint(0, 359)
x = [degree for degree in range(1080)]
y = [math.sin((degree+theta)/180*math.pi) for degree in range(1080)]
ax.plot(x, y)
canvas.draw() # do Update to GUI canvas
# 7. Close window to exit
window.close()

output of a callback as input of another callback, second callback run twice

The dashboard include a dropdown, a table and a graph.
By selecting a item from dropdown, some rows of the table is shown and figure plot the data in the table.
I have two callback,
at the first one:
input : dropdown
output: table
second one:
input: table
output: figure
So the output of first callback is input of the second one.
The following code output look like this:
import dash
import dash_table
import pandas as pd
from copy import deepcopy
import plotly.graph_objects as go
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
data = pd.DataFrame({"a": ["a1", "a2"],
"b": [1, 2]})
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.BOOTSTRAP],
prevent_initial_callbacks=True,
)
app.layout = dbc.Container(
[
dbc.Row(dbc.Col([dcc.Dropdown(
id='dropdown1',
options=[
{'label': '1', 'value': 1},
{'label': '2', 'value': 2}
],
value=1)])),
dbc.Row(
dbc.Col([
dash_table.DataTable(
id="datatable",
columns=[dict(id=i,
name=j,
)
for i, j in zip(
["a", "b"],
["a", "b"],
)],
data=data.loc[data['a'] ==
"a1"].to_dict("records"),
sort_mode="single",
)
])),
dbc.Row([dbc.Col([html.Div(
children=dcc.Graph(id="graph"), className="card")
])]),
], fluid=True,
)
########################################################
#app.callback(
Output("datatable", "data"),
[Input("dropdown1", "value")],
prevent_initial_call=True
)
def update_current_table(value):
idx = int(value)
print(idx)
if idx == 1:
df = (data.loc[data["a"] == "a1"])
return df.to_dict("records")
else:
df = deepcopy(data.loc[data["a"] == "a2"])
return df.to_dict("records")
########################################################
# app.callback(
Output('graph', 'figure'),
[Input("datatable", "derived_virtual_data"),
Input("dropdown1", "value")
],
prevent_initial_call=True
)
def update_figure(table, value):
df = pd.DataFrame(table)
print(df)
fig = go.Figure()
return fig
if __name__ == "__main__":
app.run_server(debug=True, port=8000)
The problem here is callback run twice:
a b
0 a1 1
a b
0 a2 2
and for the first time print the previous values of the table
This cause error in the main code that I am working on.
How to prevent dash from runnig callback twice?
I found some other questions complaining about running the callback twic, but I could not found a proper solution for that.
The problem is that you have two inputs for the callback that's running twice:
# app.callback(
Output('graph', 'figure'),
[Input("datatable", "derived_virtual_data"),
Input("dropdown1", "value")
],
prevent_initial_call=True
)
def update_figure(table, value):
The dropdown changes, and triggers this callback, but it also triggers the table to update. The updated table causes the derived_virtual_datato trigger this callback again. You can fix this by making the dropdown value a State instead, like this:
# app.callback(
Output('graph', 'figure'),
[Input("datatable", "derived_virtual_data")],
[State("dropdown1", "value"),]
prevent_initial_call=True
)
def update_figure(table, value):
Edit:
On another read through, you don't even use the value from the dropdown in the second input. Just removing it entirely will also work:
# app.callback(
Output('graph', 'figure'),
[Input("datatable", "derived_virtual_data")],
prevent_initial_call=True
)
def update_figure(table):

Dash Plotly: Highlighting point on the graph

I was googl'ing around trying to find solution for the following question (no luck so far). I am using Plotly Dash callback in order to build a graph:
#app.callback(
Output("graph", "figure"),
[Input("some-input", "value")],
[State("some-state", "value")])
def build_graph(input_value, state_value):
// computing data for graph_figure
return graph_figure
Now, I want to have another callback which will highlight a specific point on the graph (or add/remove a point) based on some input condition. I struggle to figure out what to use for Output in this case? Because I cannot output graph.figure again (Dash does not allow output to the same component from different callbacks). And re-drawing entire graph seems to be inefficient.
I will appreciate any suggestions.
It is possible to change the layout of your graph without redrawing the whole graph by using this library: https://github.com/jimmybow/mydcc
There is an example how to use it here: https://github.com/jimmybow/mydcc#3-mydccrelayout-
I have prepared a small example that adds an annotation on button click. Don't forget to pip install mydcc beforehand. I had to add a cache - in form of an invisible div - to preserve the old annotations when adding a new one.
import dash
import dash_core_components as dcc
import dash_html_components as html
import mydcc
import random
import json
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
initial_layout = {'title': 'Dash Data Visualization'}
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(
id='example-graph',
figure={
'data': [{'x': [1, 2, 3], 'y': [4, 1, 2], 'name': 'Test'}],
'layout': initial_layout
}
),
mydcc.Relayout(id="rrr", aim='example-graph'),
html.Button('Add random annotation', id='button'),
html.Div(id='user-cache', style={'display': 'none'},
children=json.dumps(initial_layout)),
])
#app.callback(
[dash.dependencies.Output('rrr', 'layout'),
dash.dependencies.Output('user-cache', 'children')],
[dash.dependencies.Input('button', 'n_clicks')],
[dash.dependencies.State('user-cache', 'children')])
def update_graph_annotations(n_clicks, layout):
if n_clicks is not None:
layout = json.loads(layout)
if not 'annotations' in layout:
layout['annotations'] = []
layout['annotations'].append(dict(
x=random.uniform(0, 1) * 2 + 1,
y=random.uniform(0, 1) * 2 + 1,
xref="x",
yref="y",
text="Annotation" + str(n_clicks),
showarrow=True
))
return layout, json.dumps(layout)
return dash.no_update, dash.no_update
if __name__ == '__main__':
app.run_server(debug=True)

putting HTML output from SHAP into the Dash output layout callback

I am trying to make a dashboard where the output from shap forceplot is illustrated. Shap.forceplot is HTML decorated with json. The example is here
I made a very simple dashboard using the tutorial which should plot the desirable figure after clicking the submit
here is the code
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
from sqlalchemy import create_engine
import shap
from sources import *
import xgboost
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.Input(id='input-cvr-state', type='text', value='12'),
html.Button(id='submit-button', n_clicks=0, children='Submit'),
html.Div(id='output-state'),
html.Div(id='output-shap')
])
#app.callback(Output('output-shap', 'children'),
[Input('submit-button', 'n_clicks')],
[State('input-cvr-state', 'value')])
def update_shap_figure(n_clicks, input_cvr):
shap.initjs()
# train XGBoost model
X,y = shap.datasets.boston()
model = xgboost.train({"learning_rate": 0.01}, xgboost.DMatrix(X, label=y), 100)
# explain the model's predictions using SHAP values(same syntax works for LightGBM, CatBoost, and scikit-learn models)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
# visualize the first prediction's explanation
return(shap.force_plot(explainer.expected_value, shap_values[0,:], X.iloc[0,:])) # matplotlib=True
if __name__ == '__main__':
app.run_server(debug=True)
I managed it by following steps:
import shap
from shap.plots._force_matplotlib import draw_additive_plot
# ... class dashApp
# ... callback as method
# matplotlib=False => retrun addaptativevisualizer,
# if set to True the visualizer will render the result is the stdout directly
# x is index of wanted input
# class_1 is ma class to draw
force_plot = shap.force_plot(
self.explainer.expected_value[class_1],
self.shap_values[class_1][x[0], :],
self.data.iloc[x, :].drop(columns=["TARGET"], errors="ignore"),
matplotlib=False
)
# set show=False to force the figure to be returned
force_plot_mpl = draw_additive_plot(force_plot.data, (30, 7), show=False)
return figure_to_html_img(force_plot_mpl)
def figure_to_html_img(figure):
""" figure to html base64 png image """
try:
tmpfile = io.BytesIO()
figure.savefig(tmpfile, format='png')
encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')
shap_html = html.Img(src=f"data:image/png;base64, {encoded}")
return shap_html
except AttributeError:
return ""
The result will be like it
An alternative is to use html.IFrame which will produce a better looking and fully interactive plot.
Here's an example that can be used directly as an Output
def _force_plot_html(*args):
force_plot = shap.force_plot(*args, matplotlib=False)
shap_html = f"<head>{shap.getjs()}</head><body>{force_plot.html()}</body>"
return html.Iframe(srcDoc=shap_html,
style={"width": "100%", "height": "200px", "border": 0})