Has anyone successfully manage running a Dash app using aysncio for Python?
Any examples greatly appreciated.
I have been digging deep into the event loop, co-routines and tasks in asyncio.
Essentially I want to do this:
main_loop()
1. ASYNC - Collect data from web (async call / avoid blocking)
2. ASYNC - Process data (when data is collected from task1)
3. ASYNC - Update data (when data processed from task2)
4. Display Data using Dash (this should be running constantly of status task 1,2 and 3)
So - how to communicate between tasks, and run the Display (DASH) seperately?
Ideas:
- 2 event loops
- Two thread?
Issues:
- How to communicate between tasks
- How to run the DASH app the whole time whilst the event_loop above also repeats forever / until interrupted (i.e. getData, ProcessData, UpdateDashDisplay...)
I have faced similar requirement where I want to run dash and its callbacks asynchronously. So I created this library called async-dash. With this you can use truly asynchronous callbacks as well as web sockets, SSE, etc.
Example:
import asyncio
from async_dash import Dash
from dash import html, Output, Input, dcc, callback
app = Dash(__name__)
app.layout = ...
#callback(...)
async def create_dashboard(...):
await fetch_data()
result await process_data()
return result
if __name__ == "__main__":
app.run_server()
PS: Don't use it for production (just yet!).
Dash App in Async ->
Please refer to:
Asyncio run Dash (Flask) server with another coroutine concurrently
from flask import Flask, jsonify
import asyncio
from threading import Thread
# Dash
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import requests
import plotly.graph_objects as go
# ** Async Part **
async def some_print_task():
"""Some async function"""
while True:
await asyncio.sleep(2)
print("Some Task")
async def another_task():
"""Another async function"""
while True:
await asyncio.sleep(3)
print("Another Task")
async def async_main():
"""Main async function"""
await asyncio.gather(some_print_task(), another_task())
def async_main_wrapper():
"""Not async Wrapper around async_main to run it as target function of Thread"""
asyncio.run(async_main())
# *** Dash Part ***:
app = dash.Dash()
app.layout = html.Div([
# html.Div([
# html.Iframe(src="https://www.flightradar24.com",
# height=500,width=200)
# ]),
html.Div([
html.Pre(id='counter-text',children='Active Flights Worldwide'),
dcc.Graph(id='live-update-graph',style={'width':1200}),
dcc.Interval( id='interval-component',
interval=6000,
n_intervals=0)
])
])
counter_list = []
#app.callback( Output('counter-text','children'),
[Input('interval-component','n_intervals')])
def update_layout(n):
url = "https://data-live.flightradar24.com/zones/fcgi/feed.js?faa=1&mlat=1&flarm=1&adsb=1&gnd=1&air=1&vehicles=1&estimated=1&stats=1"
res = requests.get(url, headers={'User-Agent' : 'Mozilla/5.0'}) # A fake header is necessary to access the site
data = res.json()
counter = 0
for element in data["stats"]["total"]:
counter += data["stats"]["total"][element]
counter_list.append(counter)
return "Active flights Worldwide: {}".format(counter)
#app.callback( Output('live-update-graph','figure'),
[Input('interval-component','n_intervals')])
def update_graph(n):
fig = go.Figure(data=[
go.Scatter(x=list(range(len(counter_list))),
y=counter_list,
mode='lines+markers')
])
return fig
if __name__ == '__main__':
# run all async stuff in another thread
th = Thread(target=async_main_wrapper)
th.start()
app.run_server(debug=True)
th.join()
This may work. Just create an async function for the data collection, processing and updating. Then create another async function for the dash code. Then run it like below:
async def web_data():
# web data code goes here
async def dash_code():
# Dash code goes here
async def main():
await asyncio.gather(web_data(), dash_code())
asyncio.run(main())
Related
I am using below code as suggested in https://docs.ray.io/en/master/serve/getting_started.html for my 2 gpus.
from starlette.requests import Request
import ray
from ray import serve
from transformers import pipeline
from parallel import *
#serve.deployment(num_replicas=2, ray_actor_options={"num_cpus": 0, "num_gpus": 1})
class Translator:
def init(self):
self.model = get_model()#pipeline("translation_en_to_fr", model="t5-small")
def translate(self, count: int) -> int:
model_output = predict(self.model, count)#self.model(text)
return 'translation'
async def __call__(self, http_request: Request) -> str:
count: str = await http_request.json()
return self.translate(count)
translator = Translator.bind()
I have other file which loads the model and predict.
This is how, model is loaded:
def get_model():
model = LayoutLMv2ForQuestionAnswering.from_pretrained(model_checkpoint_finetuned)
print('model loaded in device')
return model
I don't see any gpus being used while predicting. It just uses CPU.
Can anyone help here?
I believe you need to make sure model is set on the device (i.e., via model.to("cuda")).
https://huggingface.co/docs/transformers/perf_train_gpu_one
I'm looking for the simplest way to generate the same dash dashboard X times with different dataset.
Each dashboard is a single page and the main app is a flask app.
The goal is to have a dashboard template running on different dash instance with different dataset.
I started with the following code, but I'm struggling when dash pages include callbacks i.e when html.Div([html.H1('Hi there, I am app1 for reports')]) turns into a bigger function with callback
import dash
import dash_html_components as html
from flask import Flask, render_template, redirect
from werkzeug.middleware.dispatcher import DispatcherMiddleware
app = Flask(__name__)
#app.route('/')
def hello_world():
return 'Hello from Flask!'
#app.route('/test2')
def t2():
return render_template('test2.html')
dash_app1 = dash.Dash(__name__, server = app, url_base_pathname='/dashboard/' )
dash_app2 = dash.Dash(__name__, server = app, url_base_pathname='/reports/')
dash_app1.layout = html.Div([html.H1('Hi there, I am app1 for reports')])
dash_app2.layout = html.Div([html.H1('Hi there, I am app2 for reports')])
#app.route('/dashboard')
def render_dashboard():
return redirect('/dash1')
#app.route('/reports')
def render_reports():
return redirect('/dash2')
app = DispatcherMiddleware(app, {
'/dash1': dash_app1.server,
'/dash2': dash_app2.server
})
So my question is what is the best way/architecture to manage muti dash dashboards, based on the same template, running different data ?
In case it might help, I found a solution encapsulating the layout, callbacks and return function into a class.
flask_app.py
from flask import Flask, render_template, redirect
import dash
from apps.dashboard1 import Dashboard1
from apps.dashboard2 import Dashboard2
app = Flask(__name__)
#app.route('/')
def hello_world():
return 'Hello from Flask!'
#app.route('/test')
def test():
return render_template('test.html')
dash_app1 = dash.Dash(__name__, server = app, url_base_pathname='/dashboard1/' )
dash_app1.config.suppress_callback_exceptions = True
dash_app1.layout = Dashboard1(dash_app1).layout
#app.route('/dashboard1')
def render_dashboard():
return redirect('/dash1')
dash_app2 = dash.Dash(__name__, server = app, url_base_pathname='/dashboard2/')
dash_app2.config.suppress_callback_exceptions = True
dash_app2.layout = Dashboard2(dash_app2).layout
#app.route('/dashboard2')
def render_dashboard2():
return redirect('/dash2')
if __name__ == '__main__':
app.run(debug=True)
apps/dashboard1.py
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
class Dashboard1:
def __init__(self, app_dependency):
self.app_dependency = app_dependency
html.P("Your code here"),
dcc.Dropdown(...),
...
dcc.Graph(id="pie-chart"),
])
#self.app_dependency.callback(
Output("pie-chart", "figure"),
Input('organisations', 'value'))
def update_output_div(selected_org):
your_update_function
return your_outputs
I am trying to make a dashboard where the output from shap forceplot is illustrated. Shap.forceplot is HTML decorated with json. The example is here
I made a very simple dashboard using the tutorial which should plot the desirable figure after clicking the submit
here is the code
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
from sqlalchemy import create_engine
import shap
from sources import *
import xgboost
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.Input(id='input-cvr-state', type='text', value='12'),
html.Button(id='submit-button', n_clicks=0, children='Submit'),
html.Div(id='output-state'),
html.Div(id='output-shap')
])
#app.callback(Output('output-shap', 'children'),
[Input('submit-button', 'n_clicks')],
[State('input-cvr-state', 'value')])
def update_shap_figure(n_clicks, input_cvr):
shap.initjs()
# train XGBoost model
X,y = shap.datasets.boston()
model = xgboost.train({"learning_rate": 0.01}, xgboost.DMatrix(X, label=y), 100)
# explain the model's predictions using SHAP values(same syntax works for LightGBM, CatBoost, and scikit-learn models)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
# visualize the first prediction's explanation
return(shap.force_plot(explainer.expected_value, shap_values[0,:], X.iloc[0,:])) # matplotlib=True
if __name__ == '__main__':
app.run_server(debug=True)
I managed it by following steps:
import shap
from shap.plots._force_matplotlib import draw_additive_plot
# ... class dashApp
# ... callback as method
# matplotlib=False => retrun addaptativevisualizer,
# if set to True the visualizer will render the result is the stdout directly
# x is index of wanted input
# class_1 is ma class to draw
force_plot = shap.force_plot(
self.explainer.expected_value[class_1],
self.shap_values[class_1][x[0], :],
self.data.iloc[x, :].drop(columns=["TARGET"], errors="ignore"),
matplotlib=False
)
# set show=False to force the figure to be returned
force_plot_mpl = draw_additive_plot(force_plot.data, (30, 7), show=False)
return figure_to_html_img(force_plot_mpl)
def figure_to_html_img(figure):
""" figure to html base64 png image """
try:
tmpfile = io.BytesIO()
figure.savefig(tmpfile, format='png')
encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')
shap_html = html.Img(src=f"data:image/png;base64, {encoded}")
return shap_html
except AttributeError:
return ""
The result will be like it
An alternative is to use html.IFrame which will produce a better looking and fully interactive plot.
Here's an example that can be used directly as an Output
def _force_plot_html(*args):
force_plot = shap.force_plot(*args, matplotlib=False)
shap_html = f"<head>{shap.getjs()}</head><body>{force_plot.html()}</body>"
return html.Iframe(srcDoc=shap_html,
style={"width": "100%", "height": "200px", "border": 0})
I want to create a REST API without using Flask. I have created once using Flask as shown below but now I want to try without Flask. I came to know that urllib is one of the packages for doing it but not sure how to do. Even if there is some way other than urllib then that is also fine.
from werkzeug.wrappers import Request, Response
import json
from flask import Flask, request, jsonify
app = Flask(__name__)
with open ("jsonfile.json") as f:
data = json.load(f)
#data=f.read()
#app.route('/', methods=['GET', 'POST'])
def hello():
return jsonify(data)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 9000, app)
You can try something like this
import json
import http.server
import socketserver
from typing import Tuple
from http import HTTPStatus
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, request: bytes, client_address: Tuple[str, int], server: socketserver.BaseServer):
super().__init__(request, client_address, server)
#property
def api_response(self):
return json.dumps({"message": "Hello world"}).encode()
def do_GET(self):
if self.path == '/':
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(bytes(self.api_response))
if __name__ == "__main__":
PORT = 8000
# Create an object of the above class
my_server = socketserver.TCPServer(("0.0.0.0", PORT), Handler)
# Star the server
print(f"Server started at {PORT}")
my_server.serve_forever()
And testing like this
→ curl http://localhost:8000
{"message": "Hello world"}%
but keep in mind that this code is not stable and just sample
You shall take an existing web server and use WSGI compatible app, for example, for Apache HTTP 2.2
Install mod_wsgi (just search how to install mod_wsgi in your operating system)
Configure mod_wsgi in Apache httpd.conf
LoadModule wsgi_module modules/mod_wsgi.so
WSGIScriptAlias /wsgi /var/www/wsgi/myapp.wsgi
Write myapp.wsgi
The code for myapp.wsgi must call the second argument once in this way:
def application(environ, start_response):
status = '200 OK'
output = b'{"message": "Hello world"}'
response_headers = [('Content-type', 'application/json'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
I'm making a pipeline in scrapy to store scraped data in a mysql database. When the spider is run in terminal it works perfectly. Even the pipeline is opened. However the data is not being sent to the database. Any help appreciated! :)
here's the pipeline code:
import sys
import MySQLdb
import hashlib
from scrapy.exceptions import DropItem
from scrapy.http import Request
from tutorial.items import TutorialItem
class MySQLTest(object):
def __init__(self):
db = MySQLdb.connect(user='root', passwd='', host='localhost', db='python')
cursor = db.cursor()
def process_item(self, spider, item):
try:
cursor.execute("INSERT INTO info (venue, datez) VALUES (%s, %s)", (item['artist'], item['date']))
self.conn.commit()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return item
and heres the spider code
import scrapy # Import required libraries.
from scrapy.selector import HtmlXPathSelector # Allows for path detection in a websites code.
from scrapy.spider import BaseSpider # Used to create a simple spider to extract data.
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor # Needed for the extraction of href links in HTML to crawl further pages.
from scrapy.contrib.spiders import CrawlSpider # Needed to make the crawl spider.
from scrapy.contrib.spiders import Rule # Allows specified rules to affect what the link
import spotipy
import soundcloud
import mysql.connector
from tutorial.items import TutorialItem
class AllGigsSpider(CrawlSpider):
name = "allGigs" # Name of the Spider. In command promt, when in the correct folder, enter "scrapy crawl Allgigs".
allowed_domains = ["www.allgigs.co.uk"] # Allowed domains is a String NOT a URL.
start_urls = [
"http://www.allgigs.co.uk/whats_on/London/clubbing-1.html",
"http://www.allgigs.co.uk/whats_on/London/festivals-1.html",
"http://www.allgigs.co.uk/whats_on/London/comedy-1.html",
"http://www.allgigs.co.uk/whats_on/London/theatre_and_opera-1.html",
"http://www.allgigs.co.uk/whats_on/London/dance_and_ballet-1.html"
] # Specify the starting points for the web crawler.
rules = [
Rule(SgmlLinkExtractor(restrict_xpaths='//div[#class="more"]'), # Search the start URL's for
callback="parse_me",
follow=True),
]
def parse_me(self, response):
for info in response.xpath('//div[#class="entry vevent"]|//div[#class="resultbox"]'):
item = TutorialItem() # Extract items from the items folder.
item ['artist'] = info.xpath('.//span[#class="summary"]//text()').extract() # Extract artist information.
item ['date'] = info.xpath('.//span[#class="dates"]//text()').extract() # Extract date information.
#item ['endDate'] = info.xpath('.//abbr[#class="dtend"]//text()').extract() # Extract end date information.
#item ['startDate'] = info.xpath('.//abbr[#class="dtstart"]//text()').extract() # Extract start date information.
item ['genre'] = info.xpath('.//div[#class="header"]//text()').extract()
yield item # Retreive items in item.
client = soundcloud.Client(client_id='401c04a7271e93baee8633483510e263')
tracks = client.get('/tracks', limit=1, license='cc-by-sa', q= item['artist'])
for track in tracks:
print(tracks)
I believe the problem was in my settings.py file where i had missed a comma... yawn.
ITEM_PIPELINES = {
'tutorial.pipelines.MySQLTest': 300,
}