SMTP STARTTLS format - smtp

Is the EHLO message required after the TLS connection has been established? I'm using an acorn ltl-6511M wildlife camera that doesn't seem to send an EHLO message after establishing the TLS connection, causing a 503 error in my aiosmtpd-based SMTP server. It works with gmail SMTP though. Is the camera following the protocol or is my server not robust enough?
The code I'm using is:
import email
from email.header import decode_header
from email import message_from_bytes
from email.policy import default
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import LoginPassword, AuthResult
import os
import sys
import time
import signal
import logging
import ssl
##setting timezone
os.environ['TZ'] = "Europe/London"
time.tzset()
def onExit( sig, func=None):
print("*************Stopping program*****************")
controller.stop()
exit()
signal.signal(signal.SIGTERM, onExit)
# removes the spaces and replaces with _ so they're valid folder names
def clean(text):
return "".join(c if c.isalnum() else "_" for c in text)
log = logging.getLogger('mail.log')
auth_db = {
b"TestCamera1#gmail.com": b"password1",
b"user2": b"password2",
b"TestCamera1": b"password1",
}
def authenticator_func(server, session, envelope, mechanism, auth_data):
#this deliberately lets everything through
assert isinstance(auth_data, LoginPassword)
username = auth_data.login
password = auth_data.password
return AuthResult(success=True)
def configure_logging():
file_handler = logging.FileHandler("aiosmtpd.log", "a")
stderr_handler = logging.StreamHandler(sys.stderr)
logger = logging.getLogger("mail.log")
fmt = "[%(asctime)s %(levelname)s] %(message)s"
datefmt = None
formatter = logging.Formatter(fmt, datefmt, "%")
stderr_handler.setFormatter(formatter)
logger.addHandler(stderr_handler)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
class CustomHandler:
def handle_exception(self, error):
print("exception occured")
print(error)
return '542 Internal Server Error'
async def handle_DATA(self, server, session, envelope):
peer = session.peer
data = envelope.content # type: bytes
msg = message_from_bytes(envelope.content, policy=default)
# decode the email subject
print("Msg:{}".format(msg))
print("Data:{}".format(data))
print("All of the relevant data has been extracted from the email")
return '250 OK'
if __name__ == '__main__':
configure_logging()
handler = CustomHandler()
#update hostname to your IP
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain('cert.pem', 'key.pem')
controller = Controller(handler, hostname='0.0.0.0', port=587, authenticator=authenticator_func, auth_required=True,auth_require_tls=True,tls_context=context)
# Run the event loop in a separate thread.
controller.start()
while True:
time.sleep(10)
The code after trying to integrate is:
import email
from email.header import decode_header
from email import message_from_bytes
from email.policy import default
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import LoginPassword, AuthResult, SMTP
import os
import json
import re
import sys
import time
import signal
import logging
import ssl
from datetime import datetime
import configparser
##setting timezone
os.environ['TZ'] = "Europe/London"
time.tzset()
spacer = "*"*100
def onExit( sig, func=None):
print("*************Stopping program*****************",3)
controller.stop()
exit()
signal.signal(signal.SIGTERM, onExit)
# removes the spaces and replaces with _ so they're valid folder names
def clean(text):
return "".join(c if c.isalnum() else "_" for c in text)
log = logging.getLogger('mail.log')
auth_db = {
b"TestCamera1#gmail.com": b"password1",
b"user2": b"password2",
b"TestCamera1": b"password1",
}
def authenticator_func(server, session, envelope, mechanism, auth_data):
# Simple auth - is only being used because of the reolink cam
assert isinstance(auth_data, LoginPassword)
username = auth_data.login
password = auth_data.password
log.warning("Authenticator is being used")
return AuthResult(success=True)
def configure_logging():
file_handler = logging.FileHandler("aiosmtpd.log", "a")
stderr_handler = logging.StreamHandler(sys.stderr)
logger = logging.getLogger("mail.log")
fmt = "[%(asctime)s %(levelname)s] %(message)s"
datefmt = None
formatter = logging.Formatter(fmt, datefmt, "%")
stderr_handler.setFormatter(formatter)
logger.addHandler(stderr_handler)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
class SMTPNoEhloAfterStarttls(SMTP):
async def smtp_STARTTLS(self, arg: str):
print(spacer)
print("using starttls")
host_name = self.session.host_name
extended_smtp = self.session.extended_smtp
await super().smtp_STARTTLS(arg)
if host_name and extended_smtp and not self.session.host_name:
# There was an EHLO before the STARTTLS.
# RFC3207 says that we MUST reset the state
# and forget the EHLO, but unfortunately
# the client doesn't re-send the EHLO after STARTTLS,
# so we need to pretend as if an EHLO has been sent.
self.session.host_name = host_name
self.session.extended_smtp = True
class ControllerNoEhloAfterStarttls(Controller):
def factory(self):
print(spacer)
print("updating default settings")
return SMTPNoEhloAfterStarttls(self.handler, **self.SMTP_kwargs)
class CustomHandler:
def handle_exception(self, error):
print("exception occured",3)
print(error)
return '542 Internal Server Error'
async def handle_DATA(self, server, session, envelope):
peer = session.peer
data = envelope.content # type: bytes
msg = message_from_bytes(envelope.content, policy=default)
# decode the email subject
print("Msg:{}".format(msg),3)
print("Data:{}".format(data),3)
print("All of the relevant data has been extracted from the email",3)
print(spacer,3)
return '250 OK'
if __name__ == '__main__':
configure_logging()
handler = CustomHandler()
# controller = Controller(handler, hostname='10.200.68.132', port=587)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain('cert.pem', 'key.pem')
controller = Controller(handler, hostname='10.200.68.133', port=587, authenticator=authenticator_func, auth_required=True,auth_require_tls=True,tls_context=context)
# Run the event loop in a separate thread.
controller.start()
#Confirmed that this is needed to keep the SMTP server running constantly
while True:
time.sleep(10)
However, this hasn't made any difference to the error logs.

Yes, EHLO is required after STARTTLS, see RFC3207 Section 4.2 (which specifically mentions forgetting the EHLO line - emphasis mine):
Upon completion of the TLS handshake, the SMTP protocol is reset to
the initial state (the state in SMTP after a server issues a 220
service ready greeting). The server MUST discard any knowledge
obtained from the client, such as the argument to the EHLO command,
which was not obtained from the TLS negotiation itself.
This means that unfortunately your camera is not following the SMTP protocol. It is also unfortunate that GMail SMTP does not follow the protocol (it doesn't require EHLO in-between STARTTLS and AUTH LOGIN).
aiosmtpd is quite insistent on following the SMTP protocol and duly forgets the EHLO data before the STARTTLS; the EHLO hostname is stored in self.session.host_name on the aiosmtpd.smtp.SMTP object, and self.session is reset in SMTP.connection_made(), which is invoked after STARTTLS.
It is possible to make aiosmtpd break the SMTP specification and act in a highly non-conforming way. Obviously this is something you MUST NOT do in production. Use the ControllerNoEhloAfterStarttls defined below instead of the standard aiosmtpd Controller and then it should work.
from aiosmtpd.smtp import SMTP
from aiosmtpd.controller import Controller
class SMTPNoEhloAfterStarttls(SMTP):
async def smtp_STARTTLS(self, arg: str):
host_name = self.session.host_name
extended_smtp = self.session.extended_smtp
await super().smtp_STARTTLS(arg)
if host_name and extended_smtp and not self.session.host_name:
# There was an EHLO before the STARTTLS.
# RFC3207 says that we MUST reset the state
# and forget the EHLO, but unfortunately
# the client doesn't re-send the EHLO after STARTTLS,
# so we need to pretend as if an EHLO has been sent.
self.session.host_name = host_name
self.session.extended_smtp = True
class ControllerNoEhloAfterStarttls(Controller):
def factory(self):
return SMTPNoEhloAfterStarttls(self.handler, **self.SMTP_kwargs)
...and then down in if __name__ == "__main__":, instantiate the custom controller class instead of the default Controller:
controller = ControllerNoEhloAfterStarttls(handler, hostname='10.200.68.133', port=587, ......)

Related

Object of type ndarray is not JSON serializable

I am getting data using a TCP and trying it to publish it in std_msgs/Float64MultiArray format, however when I am trying to convert the json data to numpy array I am getting the following error
File "/usr/lib/python3.8/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type Int32MultiArray is not JSON serializable
Code for receiving data from tcp and sending it to rosbridge (topic chatter)
import roslibpy
import socket
import time
import struct
import numpy as np
import json
from rospy.numpy_msg import numpy_msg
from rospy_tutorials.msg import Floats
from std_msgs.msg import String,Int32,Int32MultiArray,MultiArrayLayout,MultiArrayDimension,Float64MultiArray
# ROS Python Bridge
client = roslibpy.Ros(host='localhost', port=9090) #same as rosbridge port
client.run()
print("Is ROS connected? ",client.is_connected)
talker = roslibpy.Topic(client, '/chatter', 'std_msgs/Float64MultiArray')
data_to_send = Float64MultiArray() # the data to be sent, initialise the array
HOST = "0.0.0.0" # Standard loopback interface address (localhost)
PORT = 8081 # Port to listen on (non-privileged ports are > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
while True:
s.listen()
conn, addr = s.accept()
with conn:
print(f"Connected by {addr}")
while client.is_connected:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
data = json.loads(data)
data_list = data[0]
x_val = data_list['x']
y_val = data_list['y']
z_val = data_list['z']
pos_arry = np.unique([x_val,y_val,z_val])
pos_arry = pos_arry.tolist()
my_array_for_publishing = Int32MultiArray(data=pos_arry)
print(type(pos_arry))
talker.publish(roslibpy.Message({'data': my_array_for_publishing}))
print('Sending message...')
talker.unadvertise()
client.terminate()
This error can be resolve by sending the data in list format.
pos_arry = np.unique([x_val,y_val,z_val])
pos_arry = pos_arry.tolist()
and retrieving the using .at
example
std_msgs::Float64MultiArray val = listener.data;
std::cout << val.data.at(0)<< std::endl;

AWS Lambda verify requests from Slack

I have the following Python code in AWS Lambda to verify if an event received is indeed from Slack:
import hmac
import json
def verifySignature(header,body):
h = hmac.new(key=os.getenv('sign_secret').encode(), \
msg=f'v0:{header.get("X-Slack-Request-Timestamp")}:{body}'.encode(), \
digestmod="sha256")
result = hmac.compare_digest('v0='+h.hexdigest(),header.get('X-Slack-Signature'))
print('v0='+h.hexdigest(),header.get('X-Slack-Signature'))
return result
def lambda_handler(event, context):
body = json.loads(event.get('body'))
if verifySignature(event.get('headers'),body):
do_something()
Slack's authentication protocol is outlined here. However, I keep getting mismatching signatures (result == False). Does anyone know why?
There is a high chance the issue is coming from the encoding / decoding. There is pip package to verify the slack signature.
But the verification code is simple:
import hashlib
import hmac
def verify_slack_signature(slack_post_request, slack_signing_secret):
slack_signing_secret = bytes(slack_signing_secret, 'utf-8')
slack_signature = slack_post_request['headers']['X-Slack-Signature']
slack_request_timestamp = slack_post_request['headers']['X-Slack-Request-Timestamp']
request_body = slack_post_request["body"]
basestring = f"v0:{slack_request_timestamp}:{request_body}".encode('utf-8')
my_signature = 'v0=' + hmac.new(slack_signing_secret, basestring, hashlib.sha256).hexdigest()
return hmac.compare_digest(my_signature, slack_signature)

Python script DB connection as Pool not working, but simple connection is working

I am writing a script in python 3 that is listening to the tunnel and saving and updating data inside MySQL depend on the message received.
I went into weird behavior, i did a simple connection to MySQL using pymysql module and everything worked fine, ut after sometime this simple connection closes.
So i decide to implement Pool connection to MySQL and here arises the problem. Something happens no errors, but the issue is the following:
My cursor = yield self._pool.execute(query, list(filters.values()))
cursor result = tornado_mysql.pools.Pool object at 0x0000019DE5D71F98
and stacks like that not doing anything more
If i remove yield from cursor pass that line and next line throws error
response = yield c.fetchall()
AttributeError: 'Future' object has no attribute 'fetchall'
How i can fix the MySQL pool connection to work properly?
What i tried:
I use few modules for pool connection, all goes in same issue
Did back simple connection with pymysql and worked again
Below my code:
python script file
import pika
from model import SyncModel
_model = SyncModel(conf, _server_id)
#coroutine
def main():
credentials = pika.PlainCredentials('user', 'password')
try:
cp = pika.ConnectionParameters(
host='127.0.0.1',
port=5671,
credentials=credentials,
ssl=False,
)
connection = pika.BlockingConnection(cp)
channel = connection.channel()
#coroutine
def callback(ch, method, properties, body):
if 'messageType' in properties.headers:
message_type = properties.headers['messageType']
if message_type in allowed_message_types:
result = ptoto_file._reflection.ParseMessage(descriptors[message_type], body)
if result:
result = protobuf_to_dict(result)
if message_type == 'MyMessage':
yield _model.message_event(data=result)
else:
print('Message type not in allowed list = ' + str(message_type))
print('continue listening...')
channel.basic_consume(callback, queue='queue', no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except Exception as e:
print('Could not connect to host 127.0.0.1 on port 5671')
print(str(e))
if __name__ == '__main__':
main()
SyncModel
from tornado_mysql import pools
from tornado.gen import coroutine, Return
from tornado_mysql.cursors import DictCursor
class SyncModel(object):
def __init__(self, conf, server_id):
self.conf = conf
servers = [i for i in conf.mysql.servers]
for s in servers:
if s['server_id'] == server_id:
// s hold all data as, host, user, port, autocommit, charset, db, password
s['cursorclass'] = DictCursor
self._pool = pools.Pool(s, max_idle_connections=1, max_recycle_sec=3)
#coroutine
def message_event(self, data):
table_name = 'table_name'
query = ''
data = data['message']
filters = {
'id': data['id']
}
// here the connection fails as describe above
response = yield self.query_select(table_name, self._pool, filters=filters)
#coroutine
def query_select(self, table_name, _pool, filters=None):
if filters is None:
filters = {}
combined_filters = ['`%s` = %%s' % i for i in filters.keys()]
where = 'WHERE ' + ' AND '.join(combined_filters) if combined_filters else ''
query = """SELECT * FROM `%s` %s""" % (table_name, where)
c = self._pool.execute(query, list(filters.values()))
response = yield c.fetchall()
raise Return({response})
All the code was working with just simple connection to the database, after i start to use pool example is not working anymore. Will appreciate any help in this issue.
This is a stand alone script.
The pool connection was not working, so switched back to pymysql with double checking the connection
I would like to post my answer that worked, only this solution worked for me
before connecting to mysql to check if the connection is open, if not reconnect
if not self.mysql.open:
self.mysql.ping(reconnect=True)

Python 3.6 asyncio send json message error

I'm trying to set-up a TCP echo client and server that can exchange messages using the JSON format.
I took the code from the documentation and modified it as follows:
Edit: include fix and have both server and client send JSON style messages.
import asyncio
# https://docs.python.org/3/library/asyncio-stream.html
import json
async def handle_echo(reader, writer):
data = await reader.read(100)
message = json.loads(data.decode())
addr = writer.get_extra_info('peername')
print("Received %r from %r" % (message, addr))
print("Send: %r" % json.dumps(message)) # message
json_mess_en = json.dumps(message).encode()
writer.write(json_mess_en)
#writer.write(json_mess) # not wokring
#writer.write(json.dumps(json_mess)) # not working
# Yielding from drain() gives the opportunity for the loop to schedule the write operation
# and flush the buffer. It should especially be used when a possibly large amount of data
# is written to the transport, and the coroutine does not yield-from between calls to write().
#await writer.drain()
#print("Close the client socket")
writer.close()
loop = asyncio.get_event_loop()
coro = asyncio.start_server(handle_echo, '0.0.0.0', 9090, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
and the client code:
import asyncio
import json
async def tcp_echo_client(message, loop):
reader, writer = await asyncio.open_connection('0.0.0.0', 9090,
loop=loop)
print('Send: %r' % message)
writer.write(json.dumps(message).encode())
data = await reader.read(100)
data_json = json.loads(data.decode())
print('Received: %r' % data_json)
print(data_json['welcome'])
print('Close the socket')
writer.close()
message = {'welcome': 'Hello World!'}
loop = asyncio.get_event_loop()
loop.run_until_complete(tcp_echo_client(message, loop))
loop.close()
Error
TypeError: data argument must be a bytes-like object, not 'str'
Should I use another function than writer.write to encode for JSON? Or any suggestions?
Found the solution, replace:
writer.write(json.dumps(json_mess))
for
# encode as 'UTF8'
json_mess_en = json.dumps(json_mess).encode()
writer.write(json_mess_en)

AWS Boto3 and Classic ELBs

I'm trying to get the active TLS policy on a classic load balancer (elb, not elbv2) and I'm having trouble identifying what is going wrong here:
import boto3
from botocore.exceptions import ClientError
#Declare Constant
EXPECTED_POLICY = 'ELBSecurityPolicy-TLS-1-1-2017-01'
IAMID = '518031149234'
def set_session(awsprofile, awsregion):
try:
session = boto3.Session(profile_name=awsprofile, region_name=awsregion)
return session
except ClientError as e:
print("Failed to run session setter for profile: {0} %s" % e).format(awsprofile)
def assume_role_into_account(profileId, assumeId, sessionName, assetType, regionName):
try:
setSession = set_session(profileId, regionName)
stsSession = setSession.client('sts')
response = stsSession.assume_role(RoleArn=("arn:aws:iam::{0}:role/security").format(assumeId),RoleSessionName=sessionName)
credentials = response['Credentials']
session = setSession.client(assetType, aws_access_key_id=credentials['AccessKeyId'],aws_secret_access_key=credentials['SecretAccessKey'],aws_session_token=credentials['SessionToken'])
return session
except ClientError as e:
print("AssumeRole exception for profile: {0} %s" % e).format(profileId)
def main():
try:
srev2 = assume_role_into_account('sre', IAMID,'Security-Audit-AssumeRole-Session2', 'elb', 'us-east-1')
print("AssumeRole into Account: {0} for Region: {1} .").format(IAMID, 'us-east-1')
elbs = srev2.describe_load_balancers()
for elb in elbs:
policy = session.describe_load_balancer_policies(LoadBalancerName=elb)
except ClientError as e:
print("AssumeRole: Cannot assumerole for id: {0}." % e).format(IAMID)
if __name__ == '__main__':
main()
So when I return policy when calling describe_load_balancer_policies(), there is no way to distinguish which policy is selected.
Any help?
TIA!
It is hard to help if you don't paste the related error message.
From a quick view, I guess you define local variable session in assume_role_into_account which can't be accessed in main()
If this is the problem, you can change it to
def assume_role_into_account(profileId, assumeId, sessionName, assetType, regionName):
global session
....
Refer:
Python - Global, Local and nonlocal Variables
Ok, after a long discussion with the API and ELB team folks at Amazon... here is what we came up with, note this is only for classic ELB's. This will indeed return the ELB Policy you see in the AWS Web Console, every time.
I spent a lot of time on this and i hope it benefits someone else that has also looked into this time-suck, near-fruitless endeavor:
elbs = client.describe_load_balancers()
for elb in elbs:
#Get Named Policy to pass to get the active policy. -1 denotes the last in the list.
policy_name = jmespath.search('ListenerDescriptions[].PolicyNames[] | [-1]', elb)
policy_description = client.describe_load_balancer_policies(LoadBalancerName=elb, PolicyNames=[policyname])
console_policy = jmespath.search('PolicyDescriptions[?PolicyName==`{0}`] | [0].PolicyAttributeDescriptions[0].AttributeValue'.format(policyname), policy_description)
return console_policy