Plotly dashboard hangs in loading - likely bug - plotly-dash

There is likely a bug in the following code that causes the dashboard to not load, but don't see where it's at:
from dash import Dash, html, dcc, Input, Output, State
import plotly.express as px
import plotly.graph_objects as go
import dash_bootstrap_components as dbc
import pandas as pd
from pandas_datareader import data
import yfinance as yf
yf.pdr_override()
from datetime import date
start = pd.to_datetime('2022-01-01')
end = pd.to_datetime(date.today())
def update_data():
# !! reset_index because otherwise plotly doesn't recognize the index as a x input in go.Figure
df = data.DataReader('USDJPY%3DX', data_source='yahoo', start=start, end=end).reset_index()
return df
app = Dash(__name__, external_stylesheets=[dbc.themes.LITERA])
app.layout = dbc.Container(
[
dbc.Row(
[dbc.Col([html.H1(
"Daily Price",
style={"textAlign": "center"},
),
dcc.Graph(id="price-chart", figure={})],
width=12,lg=6),
dbc.Col([html.H1(
"10 Day SMA of Daily Range",
style={"textAlign": "center"},
),
dcc.Graph(id="volatility-chart", figure={})],
width=12,lg=6)
]
),
dbc.Row(
dbc.Col(dcc.Dropdown(
id="dropdown",
options=["AAPL", "TSLA", "MSFT"],
value=["TSLA"],
style={"color": "green"}
),
className="three columns"),
),
dcc.Store(id="storage", storage_type="memory", data={}),
dcc.Interval(id="timer", interval=1000 * 60, n_intervals=0),
]
)
#app.callback(Output(component_id = "storage", component_property = "data"),
Input(component_id = "timer", component_property = "n_intervals"))
def store_data(n_time):
df = update_data()
return df.to_dict("records")
#app.callback(Output(component_id = "price-chart", component_property = "figure"),
Input(component_id = "storage", component_property = "data"))
def display_data(stored_dataframe):
df = pd.DataFrame.from_records(stored_dataframe)
fig = go.Figure(data=[go.Candlestick(x=df['Date'],
open=df['Open'],
high=df['High'],
low=df['Low'],
close=df['Close'])])
return fig
#app.callback(Output(component_id = "volatility-chart", component_property = "figure"),
Input(component_id = "storage", component_property = "data"))
def modify_data(stored_dataframe):
df = pd.DataFrame.from_records(stored_dataframe)
df['range'] = df.High - df.Low
df['range_sma'] = df.range.rolling(10).mean()
fig = px.line(df.range_sma)
return fig
if __name__ == "__main__":
app.run_server(debug=True)

I would prefer: app.layout = dash.Dash and would also choose some port:
if __name__ == "__main__":
app.run_server(debug=True, port = 8050
)

Related

How to fill in missing column value?

# Import libraries
from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
import ast
start_time = time.time()
s = requests.Session()
#Get URL and extract content
page=1
traits = []
accessories, backgrounds, shoes = [], [], []
while page != 100:
params = {
('arg', f"Qmer3VzaeFhb7c5uiwuHJbRuVCaUu72DcnSoUKb1EvnB2x/{page}"),
}
content = s.get('https://ipfs.infura.io:5001/api/v0/cat', params=params, auth=('', ''))
soup = BeautifulSoup(content.text, 'html.parser')
page = page + 1
traits = ast.literal_eval(soup.text)['attributes']
df = pd.DataFrame(traits)
df1 = df[df['trait_type']=='ACCESSORIES']
accessories.append(df1['value'].values[0])
When I run the above code I get the following error:
IndexError: index 0 is out of bounds for axis 0 with size 0
This happens because not every item has an "ACCESSORIES" trait data point. So how would I go about adding/filling in an ACCESSORIES trait for those items that don't have one with an empty, nan, or 0 value?
Following code solves this issue:
# Import libraries
from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
import ast
start_time = time.time()
s = requests.Session()
#Get URL and extract content
page=1
traits = []
accessories, backgrounds, shoes = [], [], []
while page != 100:
params = {
('arg', f"Qmer3VzaeFhb7c5uiwuHJbRuVCaUu72DcnSoUKb1EvnB2x/{page}"),
}
content = s.get('https://ipfs.infura.io:5001/api/v0/cat', params=params, auth=('', ''))
soup = BeautifulSoup(content.text, 'html.parser')
page = page + 1
traits = ast.literal_eval(soup.text)['attributes']
df = pd.DataFrame(traits)
df1 = df[df['trait_type']=='ACCESSORIES']
try:
accessories.append(df1['value'].values[0])
except:
'NONE'

Stream multiple videos using OpenCV Python Flask

I'm trying to stream 2 webcams at once using Flask Python but I'm not able to do so, when I run my code, both webcams light up but only one of the cameras show on the webpage and I'm not sure.
Here is the code I'm using:
from vCamera import VideoCamera
import pdb
app = Flask(__name__)
#app.route('/')
def index():
return render_template('index.html')
def gen(vCamera0):
while True:
frame0 = vCamera0.get_frame0()
yield (b'--frame0\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame0 + b'\r\n\r\n')
frame2 = vCamera0.get_frame2()
yield (b'--frame2\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame2 + b'\r\n\r\n')
#app.route('/video_feed0')
def video_feed0():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame0')
#app.route('/video_feed2')
def video_feed2():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame2')
if __name__ == '__main__':
app.run(host='127.0.0.1', debug=True)
And this is my camera file:
import pdb
import cv2
fullbody_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml')
upperbody_cascade = cv2.CascadeClassifier('haarcascade_upperbody.xml')
class VideoCamera(object):
def __init__(self):
self.video0 = cv2.VideoCapture(0)
self.video2 = cv2.VideoCapture(2)
def __del__(self):
self.video0.release()
def get_frame0(self):
success0, frame0 = self.video0.read()
gray0 = cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY)
fullbody0 = fullbody_cascade.detectMultiScale(gray0)
upperbody0 = upperbody_cascade.detectMultiScale(gray0)
for (x,y,w,h) in fullbody0:
cv2.rectangle(frame0, (x,y), (x+w, y+h), (255,0,0), 2)
for (x,y,w,h) in upperbody0:
cv2.rectangle(frame0, (x,y), (x+w, y+h), (255,0,0), 2)
ret0, jpeg0 = cv2.imencode('.jpg', frame0)
return jpeg0.tobytes()
def get_frame2(self):
success2, frame2 = self.video2.read()
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
fullbody2 = fullbody_cascade.detectMultiScale(gray2)
upperbody2 = upperbody_cascade.detectMultiScale(gray2)
for (x,y,w,h) in fullbody2:
cv2.rectangle(frame2, (x,y), (x+w, y+h), (255,0,0), 2)
for (x,y,w,h) in upperbody2:
cv2.rectangle(frame2, (x,y), (x+w, y+h), (255,0,0), 2)
ret2, jpeg2 = cv2.imencode('.jpg', frame2)
return jpeg2.tobytes()
I am very new to Flask so I'm not quite sure what the issue with the code I have written is. Any advice would be helpful!

AWS Sagemaker batch transform with JSON input filter

I have a custom Sagemaker instance on a NLP task and trying to run a batch transform on the following json file
{"id":123, "features":"This is a test message"}'
and im looking to output the following:
{"id":123,"SageMakerOutput":spam}
Here's my batch transform code:
transformer = sklearn.transformer(instance_count=1,
instance_type='local',
accept='application/json',
output_path="s3://spam-detection-messages-output/json_examples")
transformer.transform("s3://spam-detection-messages/json_examples", content_type='application/json', input_filter="$.features", join_source="Input", output_filter="$['features', SageMakerOutput']")
print('Waiting for transform job: ' + transformer.latest_transform_job.job_name)
transformer.wait()
According to this document,
https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#batch-transform-data-processing-examples
i should be able to grab the "features" object using input_filter,
however, it grabs the entire json payload. and only outputs the prediction
I'm also including my training code
import argparse
import pandas as pd
import os
import glob
import io
import json
from sklearn import tree
from sklearn.externals import joblib
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
vectorizer = TfidfVectorizer()
def remove_stop_words(words):
result = [i for i in words if i not in ENGLISH_STOP_WORDS]
return result
def word_stemmer(words):
return [stemmer.stem(o) for o in words]
def word_lemmatizer(words):
return [lemmatizer.lemmatize(o) for o in words]
def remove_characters(words):
return [word for word in words if len(word)> 1]
def clean_token_pipeline(words):
cleaning_utils = [remove_stop_words, word_lemmatizer]
for o in cleaning_utils:
words = o(words)
return words
def process_text(X_train, X_test, y_train, y_test):
X_train = [word_tokenize(o) for o in X_train]
X_test = [word_tokenize(o) for o in X_test]
X_train = [clean_token_pipeline(o) for o in X_train]
X_test = [clean_token_pipeline(o) for o in X_test]
X_train = [" ".join(o) for o in X_train]
X_test = [" ".join(o) for o in X_test]
return X_train, X_test, y_train, y_test
def convert_to_feature(raw_tokenize_data):
raw_sentences = [' '.join(o) for o in raw_tokenize_data]
return vectorizer.transform(raw_sentences)
def _npy_loads(data):
"""
Deserializes npy-formatted bytes into a numpy array
"""
stream = io.BytesIO(data)
return np.load(stream)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
args = parser.parse_args()
train_data = pd.read_csv(args.train+"/spamAssassin_min.csv", index_col=0)
train_data.dropna(inplace=True)
print(train_data.head())
X_train, X_test, y_train, y_test = train_test_split(train_data['message'], train_data['label'], test_size = 0.2, random_state = 1)
X_train, X_test, y_train, y_test = process_text(X_train, X_test, y_train, y_test)
X_train = [o.split(" ") for o in X_train]
X_test = [o.split(" ") for o in X_test]
vectorizer = TfidfVectorizer()
raw_sentences = [' '.join(o) for o in X_train]
vectorizer.fit(raw_sentences)
# print("saving transformer to {}".format(args.model_dir))
joblib.dump(vectorizer, os.path.join(args.model_dir, "vectorizer.joblib"))
x_train_features = convert_to_feature(X_train)
x_test_features = convert_to_feature(X_test)
clf = GaussianNB()
clf.fit(x_train_features.toarray(),y_train)
y_true, y_pred = y_test, clf.predict(x_test_features.toarray())
print(classification_report(y_true, y_pred))
joblib.dump(clf, os.path.join(args.model_dir, "model.joblib"))
def model_fn(model_dir):
"""Deserialized and return fitted model
Note that this should have the same name as the serialized model in the main method
"""
clf = joblib.load(os.path.join(model_dir, "model.joblib"))
# print("model loaded {}".format(clf))
return clf
def input_fn(request_body, request_content_type):
print("** input_fn**")
print("request_body:{} request_content_type:{}".format(request_body, request_content_type))
if request_content_type == "text/plain":
#convert to string
message = str(request_body)
return message
elif request_content_type == "application/json":
request_body_json = json.loads(request_body)
# print("json {}".format(request_body_json))
return request_body_json['features']
elif request_content_type == "application/x-npy":
return " ".join(_npy_loads(request_body))
else:
# Handle other content-types here or raise an Exception
# if the content type is not supported.
return request_body
def predict_fn(input_data, model):
print("** predict_fn**")
print("input_data: {} model:{}".format(input_data, model))
print("\n")
prefix = '/opt/ml/'
model_path = os.path.join(prefix, 'model')
my_vect = joblib.load(os.path.join(model_path, "vectorizer.joblib"))
message = "".join(clean_token_pipeline(input_data))
print("processed message: {}".format(message))
message = my_vect.transform([message])
message = message.toarray()
prediction = model.predict(message)
return prediction

Serialize Gtk TreeStore / ListStore using JSON

I made a new example which shows much better what I am trying to do. The new example gives the following ouput. Is there a way that the data can go into the respective store key (the {} brackets)?
{
"copy": [
[
[
5.0,
8.0,
9.0
]
],
[
[
4.0,
0.0,
1.0
]
]
],
"name": "dataset1",
"sets": [
{
"store": {},
"type": "vector"
},
{
"store": {},
"type": "vector"
}
]
}
New example
from gi.repository import Gtk
import json
import random
class Vector(object):
def __init__(self, data):
self.store = Gtk.ListStore(float, float, float)
self.store.append([data[0], data[1], data[2]])
self.type = "vector"
def return_data(self):
store_data = []
def iterate_over_data(model, path, itr):
row = model[path]
store_data.append([row[0], row[1], row[2]])
self.store.foreach(iterate_over_data)
return store_data
class DataSet(object):
def __init__(self, name):
self.name = name
self.sets = []
def add_vector(self):
data = [random.randint(0,9) for x in range(3)]
self.sets.append(Vector(data))
def to_json(self):
self.copy = []
for s in self.sets:
self.copy.append(s.return_data())
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
obj1 = DataSet("dataset1")
for x in range(2):
obj1.add_vector()
print(obj1.to_json())
Old example
I am currently figuring out how to serialize a Gtk ListStore that is nested in a Gtk TreeStore. I got a small example to work, but am not sure if this approach will scale for programs that have more data attached (For example the layer object could hold a color or a date of creation). Is there maybe another way to to this?
My current approach is to gather the data in list and dictionary form myself and then just create the JSON-dump. I have the feeling that this would be rather difficult to maintain if I need to attach 25 values to each layer-object.
from gi.repository import Gtk, Gdk
import json
import random
class LayerTreeView(Gtk.TreeView):
def __init__(self, store):
Gtk.TreeView.__init__(self, store)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Name", renderer, text=0)
self.append_column(column)
class DataTreeView(Gtk.TreeView):
def __init__(self, store):
Gtk.TreeView.__init__(self, store)
self.store = store
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Data", renderer, text=0)
self.append_column(column)
class MainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="TreeView Serialize")
self.connect("delete-event", Gtk.main_quit)
self.set_border_width(10)
self.set_default_size(400, 300)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6, expand=True)
self.add(vbox)
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
button = Gtk.Button("Cut")
button.connect("clicked", self.on_cut_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_COPY)
button.connect("clicked", self.on_copy_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_PASTE)
button.connect("clicked", self.on_paste_clicked)
hbox.pack_start(button, True, True, 0)
vbox.add(hbox)
self.layer_store = Gtk.TreeStore(str, object, object)
self.layer_view = LayerTreeView(self.layer_store)
self.layer_sw = Gtk.ScrolledWindow()
self.data_sw = Gtk.ScrolledWindow()
self.layer_sw.add(self.layer_view)
treebox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6, expand=True)
treebox.pack_start(self.layer_sw, True, True, 0)
treebox.pack_start(self.data_sw, True, True, 0)
vbox.add(treebox)
self.select = self.layer_view.get_selection()
self.select.connect("changed", self.on_selection_changed)
self.add_test_data()
def add_test_data(self):
for x in range(3):
data_store = Gtk.ListStore(str)
data_view = DataTreeView(data_store)
for y in range(5):
data_store.append([str(y+x)])
self.layer_store.append(None, ["Data {}".format(x), data_store, data_view])
def on_selection_changed(self, selection):
"""
When layer is switched load respective data
"""
model, treeiter = selection.get_selected()
if treeiter != None:
data_view = model[treeiter][2]
child = self.data_sw.get_child()
if child != None:
self.data_sw.remove(self.data_sw.get_child())
self.data_sw.add(data_view)
self.show_all()
def on_cut_clicked(self, button):
pass
def on_copy_clicked(self, button):
copy_list = ["safe-to-paste"]
data_dict = {}
for row in self.layer_store:
name = row[0]
data_obj = row[1]
value_list = []
for datarow in data_obj:
value = datarow[0]
value_list.append(value)
data_dict[name] = value_list
copy_list.append(data_dict)
data = json.dumps(copy_list)
self.clipboard.set_text(data, -1)
def on_paste_clicked(self, button):
paste_str = self.clipboard.wait_for_text()
try:
parse = json.loads(paste_str)
json_str = True
except:
json_str = False
if json_str is False:
return
keyword = parse[0]
if keyword != "safe-to-paste":
return
data_dict = parse[1]
for x in data_dict:
data_list = data_dict[x]
data_store = Gtk.ListStore(str)
data_view = DataTreeView(data_store)
for y in data_list:
data_store.append([str(y)])
self.layer_store.append(None, [x, data_store, data_view])
win = MainWindow()
win.show_all()
Gtk.main()
I have an improved version of your code with dict comprehension and #staticmethod that makes the signal callbacks more readable and shorter. Nevertheless, this does not really solve your problem as it still generates the json manually. If the ListStore gets more complex, it would probably be better to let the DataListStore class generate its own json with a corresponding method.
from gi.repository import Gtk, Gdk
import json
class LayerTreeView(Gtk.TreeView):
def __init__(self, store):
Gtk.TreeView.__init__(self, store)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Name", renderer, text=0)
self.append_column(column)
class DataTreeView(Gtk.TreeView):
def __init__(self):
Gtk.TreeView.__init__(self)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Data", renderer, text=0)
self.append_column(column)
class DataListStore(Gtk.ListStore):
#staticmethod
def from_json(*args, values=[]):
store = DataListStore(*args)
for value in values:
store.append((value,))
return store
class MainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="TreeView Serialize")
self.connect("delete-event", Gtk.main_quit)
self.set_border_width(10)
self.set_default_size(400, 300)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6, expand=True)
self.add(vbox)
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
button = Gtk.Button("Cut")
button.connect("clicked", self.on_cut_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_COPY)
button.connect("clicked", self.on_copy_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_PASTE)
button.connect("clicked", self.on_paste_clicked)
hbox.pack_start(button, True, True, 0)
vbox.add(hbox)
self.layer_store = Gtk.TreeStore(str, object)
self.layer_view = LayerTreeView(self.layer_store)
self.data_view = DataTreeView()
layer_sw = Gtk.ScrolledWindow()
layer_sw.add(self.layer_view)
data_sw = Gtk.ScrolledWindow()
data_sw.add(self.data_view)
treebox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6, expand=True)
treebox.pack_start(layer_sw, True, True, 0)
treebox.pack_start(data_sw, True, True, 0)
vbox.add(treebox)
select = self.layer_view.get_selection()
select.connect("changed", self.on_selection_changed)
self.add_test_data()
def add_test_data(self):
for x in range(3):
data_list = [str(y+x) for y in range(5)]
self.layer_store.append(None, ["Data {}".format(x), data_list])
def on_selection_changed(self, selection):
"""
When layer is switched load respective data
"""
model, treeiter = selection.get_selected()
if treeiter != None:
self.data_view.set_model(
DataListStore.from_json(str, values=model[treeiter][1])
)
def on_cut_clicked(self, button):
pass
def on_copy_clicked(self, button):
copy_list = [
'safe-to-paste',
{row[0]: row[1] for row in self.layer_store},
]
data = json.dumps(copy_list)
self.clipboard.set_text(data, -1)
def on_paste_clicked(self, button):
paste_str = self.clipboard.wait_for_text()
try:
parse = json.loads(paste_str)
except:
return
if parse[0] != "safe-to-paste":
return
data_dict = parse[1]
for x in data_dict:
self.layer_store.append(None, [x, data_dict[x]])
win = MainWindow()
win.show_all()
Gtk.main()

widget does not get proper size when added to a layout

I need to get the real width() of a widget when it is dynamically added to a layout, because I need to do some painting on the widget based on its width(). But the code below does not work as I expected: w.width() is always 640, which is obviously not the real width.
Any idea?
# -*- coding: utf-8 -*-
import os, sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class MainWidget(QWidget):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.setupUI()
def setupUI(self):
self.mainLayout = QVBoxLayout(self)
class MyWidget(QWidget):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
layout = QVBoxLayout(self)
layout.addWidget(QLabel('label'))
def minimunSizeHint(self):
return QSize(30, 30)
def sizeHint(self):
return QSize(100, 100)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = MainWidget()
main.show()
l = QGridLayout()
l.addWidget(MyWidget(), 0, 0)
l.addWidget(MyWidget(), 0, 1)
l.addWidget(MyWidget(), 1, 0)
l.addWidget(MyWidget(), 1, 1)
main.mainLayout.addLayout(l)
w = l.itemAtPosition(0, 0).widget()
print w.width(), w.height()
sys.exit(app.exec_())
just re-implement resizeEvent() and re-painting whenever the size changed.
# -*- coding: utf-8 -*-
import os, sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class MainWidget(QWidget):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.setupUI()
def setupUI(self):
self.mainLayout = QVBoxLayout(self)
class MyWidget(QWidget):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
layout = QVBoxLayout(self)
layout.addWidget(QLabel('label'))
def minimunSizeHint(self):
return QSize(30, 30)
def sizeHint(self):
return QSize(100, 100)
def resizeEvent(self, event):
print self.width(), self.height()
# call painting here
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = MainWidget()
main.show()
l = QGridLayout()
l.addWidget(MyWidget(), 0, 0)
l.addWidget(MyWidget(), 0, 1)
l.addWidget(MyWidget(), 1, 0)
l.addWidget(MyWidget(), 1, 1)
main.mainLayout.addLayout(l)
w = l.itemAtPosition(0, 0).widget()
sys.exit(app.exec_())