I'm working with a Key model backed by Postgres that is a generic table to hold API keys:
class Key(Model):
__tablename__ = "keys"
id = Column(Integer, primarykey=True)
user_id = Column(Integer, ForeignKey("users.id"))
brokerage_id = Column(Integer, ForeignKey("brokerages.id"))
account_id = Column(Integer, ForeignKey("accounts.id"))
key = Column(String(128))
value = Column(String(128))
In the below example, user 2 has three keys. All three are associated with brokerage 2 and account 2. This is represented by IDs 4 through 6. For this site, the user has an authentication token plus two query IDs.
id user_id brokerage_id account_id key value
--------------------------------------------------------------------
4 2 2 2 token 999999999999
5 2 2 2 query_id 888888
6 2 2 2 query_id 777777
7 1 2 3 token 444444444444
I am trying to construct a query so that my result will be modeled as such:
[(user_id, brokerage_id, account_id, token, [query_id_1, query_id_2, ...]), ...]
So for the above example, it would look like this
[(2, 2, 2, 999999999999, [888888, 777777]), (1, 2, 3, 444444444444, [])]
I've got the following queries which select the token and the query_ids
tokens = db.session.query(
Key.user_id, Key.brokerage_id, Key.account_id, Key.value
).filter(Key.key=='token').all()
query_ids = db.session.query(
Key.user_id, Key.brokerage_id, Key.account_id, Key.value
).filter(Key.key=='query_id').all()
I've tried using subquery in various ways but cannot quite get the output to resemble what I need. How can I construct a query to return results in a way that align to my list of tuples, above?
Result
Adding the final working query here thanks to #rfkortekaas
from sqlalchemy.orm import aliased
from sqlalchemy import func, and_
from project.models import Key
from project.extensions import db
key_token = aliased(Key)
q = db.session.query(
key_token.user_id,
key_token.brokerage_id,
key_token.account_id,
key_token.value.label('token'),
func.array_agg(Key.value).label('query_ids')
).join(
Key,
and_(
key_token.user_id == Key.user_id,
key_token.brokerage_id == Key.brokerage_id,
key_token.account_id == Key.account_id,
Key.key == 'query_id'
)
).filter(
key_token.key == 'token'
).group_by(
key_token.user_id,
key_token.brokerage_id,
key_token.account_id,
key_token.value
)
results = q.all()
You can use the array_agg function from PostgreSQL to create an array of the results:
from sqlalchemy.orm import aliased
key_token = aliased(Key)
stmt = select(key_token.user_id,
key_token.brokerage_id,
key_token.account_id,
key_token.value.label('token'),
func.array_agg(Key.value).label('query_ids')
).join(Key,
and_(key_token.user_id == Key.user_id,
key_token.brokerage_id == Key.brokerage_id,
key_token.account_id == Key.account_id,
Key.key == 'query_id'))\
.where(key_token.key == 'token')\
.group_by(key_token.user_id,
key_token.brokerage_id,
key_token.account_id,
key_token.value)
keys = session.execute(stmt).all()
for row in keys:
print(row)
Results in:
user_id
brokerage_id
account_id
token
query_ids
1
2
3
'44'
['4']
2
2
1
'33"
['6']
2
2
2
'99"
['8', '7]
For the following dataset:
user_id
brokerage_id
account_id
key
value
2
2
2
token
'99'
2
2
1
token
'33'
2
2
1
query_id
'6'
2
2
2
query_id
'8'
2
2
2
query_id
'7'
1
2
3
token
'44'
1
2
3
query_id
'4'
Related
I want to parse a column, and get the key-value pair as column
Input:
I have a dataframe (called df) with the following structure:
ID data
A1 {"userMatch": "{"match":{"phone":{"name":{"score":1}},"name":{"score":1}}}"}
A2 {"userMatch": "{"match":{"phone":{"name":{"score":0.934}},"name":{"score":0.952}}}"}
Expected Output:
I wanted to create new column called 'score' and get the value from the key value pair
ID score1 score2
A1 1 1
A2 0.934 0.952
Attempted Solution:
data_json = df['data'].transform(lambda x: json.loads(x))
df['score1'] = data_json.str.get('userMatch').str.get('match').str.get('phone').str.get('name').str.get('score')
df['score2'] = data_json.str.get('userMatch').str.get('match').str.get('phone').str.get('name').str.get('name').str.get('score')
Error:
TypeError: the JSON object must be str, bytes or bytearray, not Series
Notes:
I am not even sure how to get the next score2
Using mu previous though regarding using regex, this is how I would approach your problem:
import re
def getOffset(row, offset):
vals = re.findall(r"[-+]?\d*\.\d+|\d+", row.data['userMatch'])
if len(vals)> offset:
return vals[offset]
return None
df['score1'] = df.apply(lambda row: getOffset(row, 0), axis= 1)
df['score2'] = df.apply(lambda row: getOffset(row, 1), axis = 1)
df.drop(['data'], axis= 1, inplace=True)
This yields a dataframe of the form:
ID score1 score2
0 A1 1 1
1 A2 0.934 0.952
This isn't pretty, but works with split(). Couldn't get a dictionary to be read, kept getting invalid syntax or missing delimiter.
df = pd.read_csv(io.StringIO('''ID data
A1 {"userMatch": "{"match":{"phone":{"name":{"score":1}},"name":{"score":1}}}"}
A2 {"userMatch": "{"match":{"phone":{"name":{"score":0.934}},"name":{"score":0.952}}}"}'''), sep=' ', engine='python')
df['score1'] = df['data'].apply(lambda x: x.split('{"userMatch": "{"match":{"phone":{"name":{"score":')[1].split('}', 1)[0])
df['score2'] = df['data'].apply(lambda x: x.split('{"userMatch": "{"match":{"phone":{"name":{"score":')[1].split(',"name":{"score":')[1].split('}', 1)[0])
Output:
ID data score1 score2
0 A1 {"userMatch": "{"match":{"phone":{"name":{"score":1}},"name":{"score":1}}}"} 1 1
1 A2 {"userMatch": "{"match":{"phone":{"name":{"score":0.934}},"name":{"score":0.952}}}"} 0.934 0.952
We have an inventory feature where we generate Bills. There is an Edit Bill API call. We have implemented it as PATCH call.
A Bill with id = 1 has 2 LineItems :
| Stock Id | Qty | Rate |
| 10 | 2 | 10 |
| 11 | 3 | 20 |
Now lets say I want to change the quantity for stock Id : 10 to 5 and I want to change the rate for stock Id : 11 to 40
We have represented it as PATCH Call :
bill : {
id : 1
lineItems : [
{
stockId : 10,
qty : 5,
},
{
stockId : 11,
rate : 40
}
]
}
In the backend we run following query :
UPDATE `billlineitem`
SET `rate` = ( CASE
WHEN stockid = 11 THEN '40'
ELSE rate
END ),
`qty` = ( CASE
WHEN stockid = 10 THEN 5
ELSE qty
END ),
`updated_billitemquantity_at` = '2019-09-06 05:16:06.219'
WHERE `bill_id` = '1'
AND `stockid` IN ( 10, 11 )
Is it ok, in the above case when there is no change for an attribute then the else clause will take the value from the database for that attribute. The above update statement is run in a transaction.
Is this a correct approach? Will this do an update for every attribute for every stock Id. Is there a better approach?
We are using MySQL DB.
What you've written should work, but it will get very complex if you have to update different columns for many different stock IDs. It would probably be simpler, and maybe better performance, to do a separate query for each ID.
BEGIN TRANSACTION;
UPDATE billlineitem
SET rate = '40', `updated_billitemquantity_at` = '2019-09-06 05:16:06.219'
WHERE stockid = 10;
UPDATE billlineitem
SET qty = 5, `updated_billitemquantity_at` = '2019-09-06 05:16:06.219'
WHERE stockid = 11;
COMMIT;
I'm new to using databases and making django queries to get information.
If I have a table with id as the primary key, and ages and height as other columns, what query would bring me back a dictionary of all the ids and the related ages?
For instance if my table looks like below:
special_id | ages | heights
1 | 5 | x1
2 | 10 | x2
3 | 15 | x3
I'd like to have a key-value pair like {special_id: ages} where special_id is also the primary key.
Is this possible?
Try this:
from django.http import JsonResponse
def get_json(request):
result = MyModel.objects.all().values('id', 'ages') # or simply .values() to get all fields
result_list = list(result) # important: convert the QuerySet to a list object
return JsonResponse(result_list, safe=False)
You will get classic:
{field_name: field_value}
And if you want {field_value: field_value} you can do:
from django.http import JsonResponse
def get_json(request):
result = MyModel.objects.all()
a = {}
for item in result:
a[item.id] = item.age
return JsonResponse(a)
Here is my table's (events) content. eventID is "primary key" and parentID is "foreign key" with references to events(eventsID)
self referenced table :
eventID eventName parentID appFK
1 evt1 null 2
2 evt2 1 1
3 evt3 1 1
4 evt4 null 3
5 evt5 8 3
6 evt6 null 4
7 evt7 null 1
8 evt8 null 1
and another table content (applications) like this :
appID appName
1 app1
2 app2
3 app3
4 app4
I'd like to fetch all eventIDs which are parents or not with a given appID. If a child has the given appID, i'd like to get his parentID and not himself. So the result is going to be like this with appID = 1 :
eventID eventName ParentID appFK
1 evt1 null 2 // parent event who has children with appID = 1
7 evt7 null 1 // event with no child and appID = 1
8 evt8 null 1 // parent event with appID = 1 and has a child
I tried lot of examples and read a lot of solutions here but i didn't find a problem like this. Can you help me to write the right SQL ?
thx.
Try this:
SELECT DISTINCT COALESCE(e2.eventID, e1.eventID),
COALESCE(e2.eventName, e1.eventName),
COALESCE(e2.appFK, e1.appFK)
FROM events AS e1
LEFT JOIN events AS e2 ON e1.parentID = e2.eventID AND e1.appFK = 1
WHERE (e1.appFK = 1 AND e1.parentID IS NULL) OR (e2.eventID IS NOT NULL)
The LEFT JOIN fetches parent records (e1.parentID = e2.eventID) of a child having appID = 1 (e1.appFK = 1).
The WHERE clause selects root records having appID = 1 and root records that are related to a child having appID = 1 (e2.eventID IS NOT NULL).
Demo here
My project (flask+sqlalchemy) deployed by uWSGI (4 workers) and mysql (with InnoDB).
These are my Models:
class Cards(db.Model):
id =db.Column(db.Integer, primary_key = True)
no =db.Column(db.String(11),index=True, unique = True,nullable=False)
balance =db.Column(db.Numeric(12,2),nullable=False,default=0)
class trans_details(db.Model):
id =db.Column(db.Integer, primary_key = True)
from_card_id =db.Column(db.Integer, db.ForeignKey('cards.id'),nullable=False)
to_card_id =db.Column(db.Integer, db.ForeignKey('cards.id'),nullable=False)
amount =db.Column(db.Numeric(12,2),nullable=False)
from_card_balance=db.Column(db.Numeric(12,2),nullable=False)
to_card_balance =db.Column(db.Numeric(12,2),nullable=False
timestamp =db.Column(db.Numeric(17,7),default=time.time,nullable=False)#时间戳
from_card =db.relationship('Cards',foreign_keys=[from_card_id],
backref=db.backref('out_details',lazy='dynamic'))
to_card =db.relationship('Cards',foreign_keys=[to_card_id],
backref=db.backref('in_details',lazy='dynamic'))
And my code is something like this:
#contextmanager
def trans():
try:
yield
db.session.commit()
except:
db.session.rollback()
def transfer(from_card,to_card,amount):
with trans():
from_card.balance=Cards.balance-amount
to_card.balance=Cards.balance+amount
db.session.add(from_card)
db.session.add(to_card)
db.session.flush()
if from_card.balance<0:
raise Exception('xxx')
details=trans_details(from_card=from_card,
to_card=to_card,
amount=amount,
from_card_balance=from_card.balance,
to_card_balance=to_card.balance)
db.session.add(details)
db.session.flush()
def batch_transfer(rule):
with trans():
#parse the rule and call transfer function time after time
But sometimes I do not get the expected results, e.g:
select * from trans_details order by `timestamp` desc;
|from_card |to_card |amount |from_card_balance |to_card_balance |timestamp
A C 100 1000(This Should be 950) xxx 1413257244.5339300(2014-10-14 11:27:24)
A B 50 1050 150 1413257244.4818400(2014-10-14 11:27:24)
B A 100 100 1100 xxx
How can I fix it?
I fixed it.Is my problem,My code is as follows:
from_card=Cards.query.with_for_update().filter(xxx)
to_card=Cards.query.with_for_update().filter(xxx)
#call a function here,but in the function have a commit operation so..
transfer(from_card,to_card,amount)