failing to decrypt blob passwords only once in a while using amazon kms - boto

import os, sys
AWS_DIRECTORY = '/home/jenkins/.aws'
certificates_folder = 'my_folder'
SUCCESS = 'success'
class AmazonKMS(object):
def __init__(self):
# making sure boto3 has the certificates and region files
result = os.system('mkdir -p ' + AWS_DIRECTORY)
self._check_os_result(result)
result = os.system('cp ' + certificates_folder + 'kms_config ' + AWS_DIRECTORY + '/config')
self._check_os_result(result)
result = os.system('cp ' + certificates_folder + 'kms_credentials ' + AWS_DIRECTORY + '/credentials')
self._check_os_result(result)
# boto3 is the amazon client package
import boto3
self.kms_client = boto3.client('kms', region_name='us-east-1')
self.global_key_alias = 'alias/global'
self.global_key_id = None
def _check_os_result(self, result):
if result != 0 and raise_on_copy_error:
raise FAILED_COPY
def decrypt_text(self, encrypted_text):
response = self.kms_client.decrypt(
CiphertextBlob = encrypted_text
)
return response['Plaintext']
when using it
amazon_kms = AmazonKMS()
amazon_kms.decrypt_text(blob_password)
getting
E ClientError: An error occurred (AccessDeniedException) when calling the Decrypt operation: The ciphertext refers to a customer master key that does not exist, does not exist in this region, or you are not allowed to access.
stacktrace is
../keys_management/amazon_kms.py:77: in decrypt_text
CiphertextBlob = encrypted_text
/home/jenkins/.virtualenvs/global_tests/local/lib/python2.7/site-packages/botocore/client.py:253: in _api_call
return self._make_api_call(operation_name, kwargs)
/home/jenkins/.virtualenvs/global_tests/local/lib/python2.7/site-packages/botocore/client.py:557: in _make_api_call
raise error_class(parsed_response, operation_name)
This happens in a script that runs once an hour.
it's only failing 2 -3 times a day.
after a retry it succeed.
Tried to upgraded from boto3 1.2.3 to 1.4.4
what is the possible cause for this behavior ?

My guess is that the issue is not in anything you described here.
Most likely the login-tokes time out or something along those lines. To investigate this further a closer look on the way the login works here is probably helpful.
How does this code run? Is it running inside AWS like on Lambda or EC2? Do you run it from your own server (looks like it runs on jenkins)? How is the login access established? What are those kms_credentials used for and how do they look like? Do you do something like assumeing a role (which would probably work through access tokens which after some time will no longer work)?

Related

Angr for a HTB challenge, no solution found

I'm new to RE. I'm trying to solve a HackTheBox challenge called RAuth, with angr. I understand how to analyze and solve this challenge differently, without angr, but I really want to understand what is wrong with my angr script, or maybe what is the reason why angr is not feasible for this (and similar) case?
The application is easy, after it starts it's requesting the password from stdin, and exits if the password is incorrect:
>:~/challenges/rauth$ ./rauth
Welcome to secure login portal!
Enter the password to access the system:
wqeqwwqewqewqeqwewqeqweqweqweqweqwewqeqwe
You entered a wrong password!
When I run my script it works for around 15 minutes and dies with one of two errors:
"Killed"
"IndexError: tuple index out of range"
My angr script:
#!/usr/bin/env python
#coding: utf-8
import angr
import claripy
import time
import sys
def is_successful(st):
output = st.posix.dumps(sys.stdout.fileno())
if b'Successfully Authenticated' in output:
return True
return False
def should_abort(state):
output = state.posix.dumps(sys.stdout.fileno())
return b'You entered a wrong password!' in output
def main(round):
print("Checking "+str(round))
p = angr.Project('rauth',auto_load_libs=False)
flag_chars = [claripy.BVS('flag_%d' % i, 8) for i in range(round)]
flag = claripy.Concat(*flag_chars + [claripy.BVV(b'\n')])
st = p.factory.full_init_state(
args=['./rauth'],
add_options=angr.options.unicorn,
stdin=angr.SimPackets(name='stdin', content=[(flag, 32)])
#stdin=flag,
)
st.options.add(angr.options.SYMBOL_FILL_UNCONSTRAINED_MEMORY)
st.options.add(angr.options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS)
for k in flag_chars:
st.solver.add(k >= ord(" "))
st.solver.add(k <= ord("~"))
sm = p.factory.simulation_manager(st)
#sm.explore(find=is_successful,avoid=should_abort)
sm.explore(find=lambda s: b"Successfully" in s.posix.dumps(1),avoid=lambda s: b"wrong" in s.posix.dumps(1))
if len(sm.found) > 0:
for solution_state in ex.found:
print("[>>] {!r}".format(solution_state.solver.eval(flag,cast_to=bytes)))
else:
print("[>>] no solution found :(")
if __name__ == "__main__":
print(main(32))
Am I using angr for the case when it's not applicable? What am I missing?
A also tried to play with options, like removing unicorn, enabling auto_load_libs, using or disabling lambdas etc. No success.

Updating one table of MYSQL with multiple processes via pymysql

Actually, I am trying to update one table with multiple processes via pymysql, and each process reads a CSV file split from a huge one in order to promote the speed. But I get the Lock wait timeout exceeded; try restarting transaction exception when I run the script. After searching the posts on this site, I found one post which mentioned that to set or build the built-in LOAD_DATA_INFILE, but no details on it. How can I do it with 'pymysql' to reach my aim?
---------------------------first edit----------------------------------------
Here's the job method:
`def importprogram(path, name):
begin = time.time()
print('begin to import program' + name + ' info.')
# "c:\\sometest.csv"
file = open(path, mode='rb')
csvfile = csv.reader(codecs.iterdecode(file, 'utf-8'))
connection = None
try:
connection = pymysql.connect(host='a host', user='someuser', password='somepsd', db='mydb',
cursorclass=pymysql.cursors.DictCursor)
count = 1
with connection.cursor() as cursor:
sql = '''update sometable set Acolumn='{guid}' where someid='{pid}';'''
next(csvfile, None)
for line in csvfile:
try:
count = count + 1
if ''.join(line).strip():
command = sql.format(guid=line[2], pid=line[1])
cursor.execute(command)
if count % 1000 == 0:
print('program' + name + ' cursor execute', count)
except csv.Error:
print('program csv.Error:', count)
continue
except IndexError:
print('program IndexError:', count)
continue
except StopIteration:
break
except Exception as e:
print('program' + name, str(e))
finally:
connection.commit()
connection.close()
file.close()
print('program' + name + ' info done.time cost:', time.time()-begin)`
And the multi-processing method:
import multiprocessing as mp
def multiproccess():
pool = mp.Pool(3)
results = []
paths = ['C:\\testfile01.csv', 'C:\\testfile02.csv', 'C:\\testfile03.csv']
name = 1
for path in paths:
results.append(pool.apply_async(importprogram, args=(path, str(name))))
name = name + 1
print(result.get() for result in results)
pool.close()
pool.join()
And the main method:
if __name__ == '__main__':
multiproccess()
I am new to Python. How can I make the code or the way itself goes wrong? Should I use only one single process to finish the data reading and importing?
Your issue is that you are exceeding the time allowed for a response to be fetched from the server, so the client is automatically timing out.
In my experience, adjust the wait timeout to something like 6000 seconds, combine into one CSV and just leave the data to import. Also, I would recommend running the query direct from MySQL rather than Python.
The way I usually import CSV data from Python to MySQL is through the INSERT ... VALUES ... method, and I only do so when some kind of manipulation of the data is required (i.e. inserting different rows into different tables).
I like your approach and understand your thinking but in reality there is no need. The benefit to the INSERT ... VALUES ... method is that you won't run into any timeout issue.

Get only part of a JSON when using an API on NodeMCU

I am using http.get() to get a JSON from an API I am using, but it's not getting the data. I have the suspicion that this JSON is too big for the NodeMCU. I only need the information in the subpart "stats:". Is it possible to only http.get() that part of the JSON?
EDIT:
This is my code
function getstats()
http.get("https://api.aeon-pool.com/v1/stats_address?address=WmsGUrXTR7sgKmHEqRNLgPLndWKSvjFXcd4soHnaxVjY3aBWW4kncTrRcBJJgUkeGwcHfzuZABk6XK6qAp8VmSci2AyGHcUit", nil, function(code, pool)
if (code < 0) then
print("can't get stats")
else
h = cjson.decode(pool)
hashrate = h[1]["hashrate"]
print(hashrate)
dofile('update_display.lua')
end
end)
end
I also have another function getting data from another api above getstats()
function getaeonrate()
http.get("https://api.coinmarketcap.com/v1/ticker/aeon/?convert=EUR", nil, function(code, dataaeon)
if (code < 0) then
print("can't get aeon")
else
-- Decode JSON data
m = cjson.decode(dataaeon)
-- Extract AEON/EUR price from decoded JSON
aeonrate = string.format("%f", m[1]["price_eur"]);
aeonchange = "24h " .. m[1]["percent_change_24h"] .. "% 7d " .. m[1]["percent_change_7d"] .. "%"
dofile('update_display.lua')
end
end)
end
But now the weird thing is, when I want to access 'pool' from getstats() I get the json data from getaeonrate(). So "hashrate" isn't even in the json because I am getting the json from another function.
I tried making a new project only with getstats() and that doesn't work at all I always get errors like this
HTTP client: Disconnected with error: -9
HTTP client: Connection timeout
HTTP client: Connection timeout
Yesterday I thought that the response was too big from api.aeon-pool.com, I if you look at the json in your webbrowser you can see that the top entry is 'stats:' and I only need that, none of the other stuff. So If the request is to big It would be nice to only http.get() that part of the json, hence my original question. At the moment I am not even sure what is not working correctly, I read that the nodemcu firmware generally had problems with http.get() and that it didn't work correctly for a long time, but getting data from api.coinmarketcap.com works fine in the original project.
The problems with the HTTP module are with near certainty related to https://github.com/nodemcu/nodemcu-firmware/issues/1707 (SSL and HTTP are problematic).
Therefore, I tried with the more bare-bone TLS module on the current master branch. This means you need to manually parse the HTTP response including all headers looking for the JSON content. Besides, you seem to be on an older NodeMCU version as you're still using CJSON - I used SJSON below:
Current NodeMCU master branch
function getstats()
buffer = nil
counter = 0
local srv = tls.createConnection()
srv:on("receive", function(sck, payload)
print("[stats] received data, " .. string.len(payload))
if buffer == nil then
buffer = payload
else
buffer = buffer .. payload
end
counter = counter + 1
-- not getting HTTP content-length header back -> poor man's checking for complete response
if counter == 2 then
print("[stats] done, processing payload")
local beginJsonString = buffer:find("{")
local jsonString = buffer:sub(beginJsonString)
local hashrate = sjson.decode(jsonString)["stats"]["hashrate"]
print("[stats] hashrate from aeon-pool.com: " .. hashrate)
end
end)
srv:on("connection", function(sck, c)
sck:send("GET /v1/stats_address?address=WmsGUrXTR7sgKmHEqRNLgPLndWKSvjFXcd4soHnaxVjY3aBWW4kncTrRcBJJgUkeGwcHfzuZABk6XK6qAp8VmSci2AyGHcUit HTTP/1.1\r\nHost: api.aeon-pool.com\r\nConnection: close\r\nAccept: */*\r\n\r\n")
end)
srv:connect(443, "api.aeon-pool.com")
end
Note that the receive event is fired for every network frame: https://nodemcu.readthedocs.io/en/latest/en/modules/net/#netsocketon
NodeMCU fails to establish a connection to api.coinmarketcap.com due to a TLS handshake failure. Not sure why that is. Otherwise your getaeonrate() could be implemented likewise.
Frozen 1.5.4 branch
With the old branch the net module can connect to coinmarketcap.com.
function getaeonrate()
local srv = net.createConnection(net.TCP, 1)
srv:on("receive", function(sck, payload)
print("[aeon rate] received data, " .. string.len(payload))
local beginJsonString = payload:find("%[")
local jsonString = payload:sub(beginJsonString)
local json = cjson.decode(jsonString)
local aeonrate = string.format("%f", json[1]["price_eur"]);
local aeonchange = "24h " .. json[1]["percent_change_24h"] .. "% 7d " .. json[1]["percent_change_7d"] .. "%"
print("[aeon rate] aeonrate from coinmarketcap.com: " .. aeonrate)
print("[aeon rate] aeonchange from coinmarketcap.com: " .. aeonchange)
end)
srv:on("connection", function(sck, c)
sck:send("GET /v1/ticker/aeon/?convert=EUR HTTP/1.1\r\nHost: api.coinmarketcap.com\r\nConnection: close\r\nAccept: */*\r\n\r\n")
end)
srv:connect(443, "api.coinmarketcap.com")
end
Conclusion
The HTTP module and TLS seem a no-go for your APIs due to a bug in the firmware (1707).
The net/TLS module of the current master branch manages to connect to api.aeon-pool.com but not to api.coinmarketcap.com.
With the old and frozen 1.5.4 branch it's exactly the other way around.
There may (also) be issues with cipher suits that don't match between the firmware and the API provider(s).
-> :( no fun like that

How to store MQTT Mosquitto publish events into MySQL? [duplicate]

This question already has an answer here:
Is there a way to store Mosquitto payload into an MySQL database for history purpose?
(1 answer)
Closed 4 years ago.
I've connected a device that communicates to my mosquitto MQTT server (RPi) and is sending out publications to a specified topic. What I want to do now is to store the messages published on that topic on the MQTT server into a MySQL database. I know how MySQL works, but I don't know how to listen for these incoming publications. I'm looking for a light-weight solution that runs in the background. Any pointers or ideas on libraries to use are very welcome.
I've done something similar in the last days:
live-collecting weatherstation-data with pywws
publishing with pywws.service.mqtt to mqtt-Broker
python-script on NAS collecting the data and writing to MariaDB
#!/usr/bin/python -u
import mysql.connector as mariadb
import paho.mqtt.client as mqtt
import ssl
mariadb_connection = mariadb.connect(user='USER', password='PW', database='MYDB')
cursor = mariadb_connection.cursor()
# MQTT Settings
MQTT_Broker = "192.XXX.XXX.XXX"
MQTT_Port = 8883
Keep_Alive_Interval = 60
MQTT_Topic = "/weather/pywws/#"
# Subscribe
def on_connect(client, userdata, flags, rc):
mqttc.subscribe(MQTT_Topic, 0)
def on_message(mosq, obj, msg):
# Prepare Data, separate columns and values
msg_clear = msg.payload.translate(None, '{}""').split(", ")
msg_dict = {}
for i in range(0, len(msg_clear)):
msg_dict[msg_clear[i].split(": ")[0]] = msg_clear[i].split(": ")[1]
# Prepare dynamic sql-statement
placeholders = ', '.join(['%s'] * len(msg_dict))
columns = ', '.join(msg_dict.keys())
sql = "INSERT INTO pws ( %s ) VALUES ( %s )" % (columns, placeholders)
# Save Data into DB Table
try:
cursor.execute(sql, msg_dict.values())
except mariadb.Error as error:
print("Error: {}".format(error))
mariadb_connection.commit()
def on_subscribe(mosq, obj, mid, granted_qos):
pass
mqttc = mqtt.Client()
# Assign event callbacks
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_subscribe = on_subscribe
# Connect
mqttc.tls_set(ca_certs="ca.crt", tls_version=ssl.PROTOCOL_TLSv1_2)
mqttc.connect(MQTT_Broker, int(MQTT_Port), int(Keep_Alive_Interval))
# Continue the network loop & close db-connection
mqttc.loop_forever()
mariadb_connection.close()
If you are familiar with Python the Paho MQTT library is simple, light on resources, and interfaces well with Mosquitto. To use it simply subscribe to the topic and set up a callback to pass the payload to MySQL using peewee as shown in this answer. Run the script in the background and call it good!

Is there a tool to check database integrity in Django?

The MySQL database powering our Django site has developed some integrity problems; e.g. foreign keys that refer to nonexistent rows. I won't go into how we got into this mess, but I'm now looking at how to fix it.
Basically, I'm looking for a script that scans all models in the Django site, and checks whether all foreign keys and other constraints are correct. Hopefully, the number of problems will be small enough so they can be fixed by hand.
I could code this up myself but I'm hoping that somebody here has a better idea.
I found django-check-constraints but it doesn't quite fit the bill: right now, I don't need something to prevent these problems, but to find them so they can be fixed manually before taking other steps.
Other constraints:
Django 1.1.1 and upgrading has been determined to break things
MySQL 5.0.51 (Debian Lenny), currently with MyISAM tables
Python 2.5, might be upgradable but I'd rather not right now
(Later, we will convert to InnoDB for proper transaction support, and maybe foreign key constraints on the database level, to prevent similar problems in the future. But that's not the topic of this question.)
I whipped up something myself. The management script below should be saved in myapp/management/commands/checkdb.py. Make sure that intermediate directories have an __init__.py file.
Usage: ./manage.py checkdb for a full check; use --exclude app.Model or -e app.Model to exclude the model Model in the app app.
from django.core.management.base import BaseCommand, CommandError
from django.core.management.base import NoArgsCommand
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from optparse import make_option
from lib.progress import with_progress_meter
def model_name(model):
return '%s.%s' % (model._meta.app_label, model._meta.object_name)
class Command(BaseCommand):
args = '[-e|--exclude app_name.ModelName]'
help = 'Checks constraints in the database and reports violations on stdout'
option_list = NoArgsCommand.option_list + (
make_option('-e', '--exclude', action='append', type='string', dest='exclude'),
)
def handle(self, *args, **options):
# TODO once we're on Django 1.2, write to self.stdout and self.stderr instead of plain print
exclude = options.get('exclude', None) or []
failed_instance_count = 0
failed_model_count = 0
for app in models.get_apps():
for model in models.get_models(app):
if model_name(model) in exclude:
print 'Skipping model %s' % model_name(model)
continue
fail_count = self.check_model(app, model)
if fail_count > 0:
failed_model_count += 1
failed_instance_count += fail_count
print 'Detected %d errors in %d models' % (failed_instance_count, failed_model_count)
def check_model(self, app, model):
meta = model._meta
if meta.proxy:
print 'WARNING: proxy models not currently supported; ignored'
return
# Define all the checks we can do; they return True if they are ok,
# False if not (and print a message to stdout)
def check_foreign_key(model, field):
foreign_model = field.related.parent_model
def check_instance(instance):
try:
# name: name of the attribute containing the model instance (e.g. 'user')
# attname: name of the attribute containing the id (e.g. 'user_id')
getattr(instance, field.name)
return True
except ObjectDoesNotExist:
print '%s with pk %s refers via field %s to nonexistent %s with pk %s' % \
(model_name(model), str(instance.pk), field.name, model_name(foreign_model), getattr(instance, field.attname))
return check_instance
# Make a list of checks to run on each model instance
checks = []
for field in meta.local_fields + meta.local_many_to_many + meta.virtual_fields:
if isinstance(field, models.ForeignKey):
checks.append(check_foreign_key(model, field))
# Run all checks
fail_count = 0
if checks:
for instance in with_progress_meter(model.objects.all(), model.objects.count(), 'Checking model %s ...' % model_name(model)):
for check in checks:
if not check(instance):
fail_count += 1
return fail_count
I'm making this a community wiki because I welcome any and all improvements to my code!
Thomas' answer is great but is now a bit out of date.
I have updated it as a gist to support Django 1.8+.