Related
Is the EHLO message required after the TLS connection has been established? I'm using an acorn ltl-6511M wildlife camera that doesn't seem to send an EHLO message after establishing the TLS connection, causing a 503 error in my aiosmtpd-based SMTP server. It works with gmail SMTP though. Is the camera following the protocol or is my server not robust enough?
The code I'm using is:
import email
from email.header import decode_header
from email import message_from_bytes
from email.policy import default
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import LoginPassword, AuthResult
import os
import sys
import time
import signal
import logging
import ssl
##setting timezone
os.environ['TZ'] = "Europe/London"
time.tzset()
def onExit( sig, func=None):
print("*************Stopping program*****************")
controller.stop()
exit()
signal.signal(signal.SIGTERM, onExit)
# removes the spaces and replaces with _ so they're valid folder names
def clean(text):
return "".join(c if c.isalnum() else "_" for c in text)
log = logging.getLogger('mail.log')
auth_db = {
b"TestCamera1#gmail.com": b"password1",
b"user2": b"password2",
b"TestCamera1": b"password1",
}
def authenticator_func(server, session, envelope, mechanism, auth_data):
#this deliberately lets everything through
assert isinstance(auth_data, LoginPassword)
username = auth_data.login
password = auth_data.password
return AuthResult(success=True)
def configure_logging():
file_handler = logging.FileHandler("aiosmtpd.log", "a")
stderr_handler = logging.StreamHandler(sys.stderr)
logger = logging.getLogger("mail.log")
fmt = "[%(asctime)s %(levelname)s] %(message)s"
datefmt = None
formatter = logging.Formatter(fmt, datefmt, "%")
stderr_handler.setFormatter(formatter)
logger.addHandler(stderr_handler)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
class CustomHandler:
def handle_exception(self, error):
print("exception occured")
print(error)
return '542 Internal Server Error'
async def handle_DATA(self, server, session, envelope):
peer = session.peer
data = envelope.content # type: bytes
msg = message_from_bytes(envelope.content, policy=default)
# decode the email subject
print("Msg:{}".format(msg))
print("Data:{}".format(data))
print("All of the relevant data has been extracted from the email")
return '250 OK'
if __name__ == '__main__':
configure_logging()
handler = CustomHandler()
#update hostname to your IP
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain('cert.pem', 'key.pem')
controller = Controller(handler, hostname='0.0.0.0', port=587, authenticator=authenticator_func, auth_required=True,auth_require_tls=True,tls_context=context)
# Run the event loop in a separate thread.
controller.start()
while True:
time.sleep(10)
The code after trying to integrate is:
import email
from email.header import decode_header
from email import message_from_bytes
from email.policy import default
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import LoginPassword, AuthResult, SMTP
import os
import json
import re
import sys
import time
import signal
import logging
import ssl
from datetime import datetime
import configparser
##setting timezone
os.environ['TZ'] = "Europe/London"
time.tzset()
spacer = "*"*100
def onExit( sig, func=None):
print("*************Stopping program*****************",3)
controller.stop()
exit()
signal.signal(signal.SIGTERM, onExit)
# removes the spaces and replaces with _ so they're valid folder names
def clean(text):
return "".join(c if c.isalnum() else "_" for c in text)
log = logging.getLogger('mail.log')
auth_db = {
b"TestCamera1#gmail.com": b"password1",
b"user2": b"password2",
b"TestCamera1": b"password1",
}
def authenticator_func(server, session, envelope, mechanism, auth_data):
# Simple auth - is only being used because of the reolink cam
assert isinstance(auth_data, LoginPassword)
username = auth_data.login
password = auth_data.password
log.warning("Authenticator is being used")
return AuthResult(success=True)
def configure_logging():
file_handler = logging.FileHandler("aiosmtpd.log", "a")
stderr_handler = logging.StreamHandler(sys.stderr)
logger = logging.getLogger("mail.log")
fmt = "[%(asctime)s %(levelname)s] %(message)s"
datefmt = None
formatter = logging.Formatter(fmt, datefmt, "%")
stderr_handler.setFormatter(formatter)
logger.addHandler(stderr_handler)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
class SMTPNoEhloAfterStarttls(SMTP):
async def smtp_STARTTLS(self, arg: str):
print(spacer)
print("using starttls")
host_name = self.session.host_name
extended_smtp = self.session.extended_smtp
await super().smtp_STARTTLS(arg)
if host_name and extended_smtp and not self.session.host_name:
# There was an EHLO before the STARTTLS.
# RFC3207 says that we MUST reset the state
# and forget the EHLO, but unfortunately
# the client doesn't re-send the EHLO after STARTTLS,
# so we need to pretend as if an EHLO has been sent.
self.session.host_name = host_name
self.session.extended_smtp = True
class ControllerNoEhloAfterStarttls(Controller):
def factory(self):
print(spacer)
print("updating default settings")
return SMTPNoEhloAfterStarttls(self.handler, **self.SMTP_kwargs)
class CustomHandler:
def handle_exception(self, error):
print("exception occured",3)
print(error)
return '542 Internal Server Error'
async def handle_DATA(self, server, session, envelope):
peer = session.peer
data = envelope.content # type: bytes
msg = message_from_bytes(envelope.content, policy=default)
# decode the email subject
print("Msg:{}".format(msg),3)
print("Data:{}".format(data),3)
print("All of the relevant data has been extracted from the email",3)
print(spacer,3)
return '250 OK'
if __name__ == '__main__':
configure_logging()
handler = CustomHandler()
# controller = Controller(handler, hostname='10.200.68.132', port=587)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain('cert.pem', 'key.pem')
controller = Controller(handler, hostname='10.200.68.133', port=587, authenticator=authenticator_func, auth_required=True,auth_require_tls=True,tls_context=context)
# Run the event loop in a separate thread.
controller.start()
#Confirmed that this is needed to keep the SMTP server running constantly
while True:
time.sleep(10)
However, this hasn't made any difference to the error logs.
Yes, EHLO is required after STARTTLS, see RFC3207 Section 4.2 (which specifically mentions forgetting the EHLO line - emphasis mine):
Upon completion of the TLS handshake, the SMTP protocol is reset to
the initial state (the state in SMTP after a server issues a 220
service ready greeting). The server MUST discard any knowledge
obtained from the client, such as the argument to the EHLO command,
which was not obtained from the TLS negotiation itself.
This means that unfortunately your camera is not following the SMTP protocol. It is also unfortunate that GMail SMTP does not follow the protocol (it doesn't require EHLO in-between STARTTLS and AUTH LOGIN).
aiosmtpd is quite insistent on following the SMTP protocol and duly forgets the EHLO data before the STARTTLS; the EHLO hostname is stored in self.session.host_name on the aiosmtpd.smtp.SMTP object, and self.session is reset in SMTP.connection_made(), which is invoked after STARTTLS.
It is possible to make aiosmtpd break the SMTP specification and act in a highly non-conforming way. Obviously this is something you MUST NOT do in production. Use the ControllerNoEhloAfterStarttls defined below instead of the standard aiosmtpd Controller and then it should work.
from aiosmtpd.smtp import SMTP
from aiosmtpd.controller import Controller
class SMTPNoEhloAfterStarttls(SMTP):
async def smtp_STARTTLS(self, arg: str):
host_name = self.session.host_name
extended_smtp = self.session.extended_smtp
await super().smtp_STARTTLS(arg)
if host_name and extended_smtp and not self.session.host_name:
# There was an EHLO before the STARTTLS.
# RFC3207 says that we MUST reset the state
# and forget the EHLO, but unfortunately
# the client doesn't re-send the EHLO after STARTTLS,
# so we need to pretend as if an EHLO has been sent.
self.session.host_name = host_name
self.session.extended_smtp = True
class ControllerNoEhloAfterStarttls(Controller):
def factory(self):
return SMTPNoEhloAfterStarttls(self.handler, **self.SMTP_kwargs)
...and then down in if __name__ == "__main__":, instantiate the custom controller class instead of the default Controller:
controller = ControllerNoEhloAfterStarttls(handler, hostname='10.200.68.133', port=587, ......)
I am working on an autonomous car implementation for a web browser game with Python 2x. I use Tornado Web Server to run game on localhost and I post and receive data from game with JSON data format in the function called "FrameHandler" and also I determine what the act of car should be in "to_dict_faster()" function.
Here, my problem is that I can write data to text file which is hold in speed_data variable in specific time interval with help of a coroutine. However, I can't dump JSON data to function in this specific time interval because "FrameHandler" acts like While True and it always requests data to dump. What I am trying to do is sending desired acts as writing text file in specific time interval while not changing flow frame handler because it affects FPS of the game.
I am trying to figure out How can I do that for a long time any help would be great here:
#gen.coroutine
def sampler():
io_loop = tornado.ioloop.IOLoop.current()
start = time.time()
while True:
with open("Sampled_Speed.txt", "a") as text_file:
text_file.write("%d,%.2f\n" % (speed_data, ((time.time() - start))))
yield gen.Task(io_loop.add_timeout, io_loop.time() + period)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/static/v2.curves.html")
class FrameHandler(tornado.web.RequestHandler):
def post(self):
global speed_data
data = json.loads(self.get_arguments("telemetry")[0])
ar = np.fromstring(base64.decodestring(self.request.body), dtype=np.uint8)
image = ar.reshape(hp.INPUT_SIZE, hp.INPUT_SIZE, hp.NUM_CHANNELS)
left, right, faster, slower = data["action"]
terminal, action, all_data, was_start = (
data["terminal"],
Action(left=left, right=right, faster=faster, slower=slower),
data["all_data"],
data["was_start"]
)
for i in range(len(all_data)):
data_dict=all_data[i]
speed_data = data_dict[u'speed']
position_data=data_dict[u'position']
result_action = agent.steps(image, 0.1, terminal, was_start, action, all_data)
if speed_data < 4000:
self.write(json.dumps(result_action.to_dict_faster()))
else:
self.write(json.dumps(result_action.to_dict_constant()))
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/frame", FrameHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": static_path})
], debug=True)
if __name__ == "__main__":
app = make_app()
if "SERVER_PORT" in os.environ:
port = int(os.environ["SERVER_PORT"])
else:
port = 8880
print "LISTENING ON PORT: %d" % port
app.listen(port)
tornado.ioloop.IOLoop.current().run_sync(sampler)
tornado.ioloop.IOLoop.current().start()
You can move file writing to a different thread (using tornado's run_on_executor for example), so python interpreter will automatically switch from Sampler to main thread with FrameHandler on write. But you have to use thread-safe speed_data variable, I've used stdlib Queue.Queue as an example:
class Handler(tornado.web.RequestHandler):
#gen.coroutine
def get(self):
global speed_data
speed_data.put("REALLY BIG TEST DATA\n")
self.finish("OK")
class Sampler():
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def __init__(self, queue):
self._q = queue
#run_on_executor
def write_sample(self):
with open("foobar.txt", "w") as f:
while True:
data = self._q.get()
f.write(data)
if __name__ == '__main__':
application = Application(
[("/status", Handler)]
)
server = HTTPServer(application)
server.listen(8888)
speed_data = Queue.Queue()
smp = Sampler(speed_data)
IOLoop.current().add_callback(smp.write_sample)
IOLoop.current().start()
In there any way to compile and deploy Viper Smart Contract automatically to some custom chain (not tester chain from ethereum.tools)
According to the GitHub issue and two posts that I found (this one and that one), the best option is to compile contract and then insert into geth manually.
Can anyone share their solutions?
As mentioned in Github issue you provided - you can achieve it by using web3.py library and a Viper library itself.
Here is an example of a script which probably covers your needs:
from web3 import Web3, HTTPProvider
from viper import compiler
from web3.contract import ConciseContract
from time import sleep
example_contract = open('./path/to/contract.v.py', 'r')
contract_code = example_contract.read()
example_contract.close()
cmp = compiler.Compiler()
contract_bytecode = cmp.compile(contract_code).hex()
contract_abi = cmp.mk_full_signature(contract_code)
web3 = Web3(HTTPProvider('http://localhost:8545'))
web3.personal.unlockAccount('account_addr', 'account_pwd', 120)
# Instantiate and deploy contract
contract_bytecode = web3.eth.contract(contract_abi, bytecode=contract_bytecode)
# Get transaction hash from deployed contract
tx_hash = contract_bytecode.deploy(transaction={'from': 'account_addr', 'gas': 410000})
# Waiting for contract to be delpoyed
i = 0
while i < 5:
try:
# Get tx receipt to get contract address
tx_receipt = web3.eth.getTransactionReceipt(tx_hash)
contract_address = tx_receipt['contractAddress']
break # if success, then exit the loop
except:
print("Reading failure for {} time(s)".format(i + 1))
sleep(5+i)
i = i + 1
if i >= 5:
raise Exception("Cannot wait for contract to be deployed")
# Contract instance in concise mode
contract_instance = web3.eth.contract(contract_abi, contract_address, ContractFactoryClass=ConciseContract)
# Calling contract method
print('Contract value: {}'.format(contract_instance.some_method()))
I'm trying to get the active TLS policy on a classic load balancer (elb, not elbv2) and I'm having trouble identifying what is going wrong here:
import boto3
from botocore.exceptions import ClientError
#Declare Constant
EXPECTED_POLICY = 'ELBSecurityPolicy-TLS-1-1-2017-01'
IAMID = '518031149234'
def set_session(awsprofile, awsregion):
try:
session = boto3.Session(profile_name=awsprofile, region_name=awsregion)
return session
except ClientError as e:
print("Failed to run session setter for profile: {0} %s" % e).format(awsprofile)
def assume_role_into_account(profileId, assumeId, sessionName, assetType, regionName):
try:
setSession = set_session(profileId, regionName)
stsSession = setSession.client('sts')
response = stsSession.assume_role(RoleArn=("arn:aws:iam::{0}:role/security").format(assumeId),RoleSessionName=sessionName)
credentials = response['Credentials']
session = setSession.client(assetType, aws_access_key_id=credentials['AccessKeyId'],aws_secret_access_key=credentials['SecretAccessKey'],aws_session_token=credentials['SessionToken'])
return session
except ClientError as e:
print("AssumeRole exception for profile: {0} %s" % e).format(profileId)
def main():
try:
srev2 = assume_role_into_account('sre', IAMID,'Security-Audit-AssumeRole-Session2', 'elb', 'us-east-1')
print("AssumeRole into Account: {0} for Region: {1} .").format(IAMID, 'us-east-1')
elbs = srev2.describe_load_balancers()
for elb in elbs:
policy = session.describe_load_balancer_policies(LoadBalancerName=elb)
except ClientError as e:
print("AssumeRole: Cannot assumerole for id: {0}." % e).format(IAMID)
if __name__ == '__main__':
main()
So when I return policy when calling describe_load_balancer_policies(), there is no way to distinguish which policy is selected.
Any help?
TIA!
It is hard to help if you don't paste the related error message.
From a quick view, I guess you define local variable session in assume_role_into_account which can't be accessed in main()
If this is the problem, you can change it to
def assume_role_into_account(profileId, assumeId, sessionName, assetType, regionName):
global session
....
Refer:
Python - Global, Local and nonlocal Variables
Ok, after a long discussion with the API and ELB team folks at Amazon... here is what we came up with, note this is only for classic ELB's. This will indeed return the ELB Policy you see in the AWS Web Console, every time.
I spent a lot of time on this and i hope it benefits someone else that has also looked into this time-suck, near-fruitless endeavor:
elbs = client.describe_load_balancers()
for elb in elbs:
#Get Named Policy to pass to get the active policy. -1 denotes the last in the list.
policy_name = jmespath.search('ListenerDescriptions[].PolicyNames[] | [-1]', elb)
policy_description = client.describe_load_balancer_policies(LoadBalancerName=elb, PolicyNames=[policyname])
console_policy = jmespath.search('PolicyDescriptions[?PolicyName==`{0}`] | [0].PolicyAttributeDescriptions[0].AttributeValue'.format(policyname), policy_description)
return console_policy
I'm trying to use the AssumeRole in such a way that i'm traversing multiple accounts and retrieving assets for those accounts. I've made it to this point:
import boto3
stsclient = boto3.client('sts')
assumedRoleObject = sts_client.assume_role(
RoleArn="arn:aws:iam::account-of-role-to-assume:role/name-of-role",
RoleSessionName="AssumeRoleSession1")
Great, i have the assumedRoleObject. But now i want to use that to list things like ELBs or something that isn't a built-in low level resource.
How does one go about doing that? If i may ask - please code out a full example, so that everyone can benefit.
Here's a code snippet from the official AWS documentation where an s3 resource is created for listing all s3 buckets. boto3 resources or clients for other services can be built in a similar fashion.
# create an STS client object that represents a live connection to the
# STS service
sts_client = boto3.client('sts')
# Call the assume_role method of the STSConnection object and pass the role
# ARN and a role session name.
assumed_role_object=sts_client.assume_role(
RoleArn="arn:aws:iam::account-of-role-to-assume:role/name-of-role",
RoleSessionName="AssumeRoleSession1"
)
# From the response that contains the assumed role, get the temporary
# credentials that can be used to make subsequent API calls
credentials=assumed_role_object['Credentials']
# Use the temporary credentials that AssumeRole returns to make a
# connection to Amazon S3
s3_resource=boto3.resource(
's3',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
)
# Use the Amazon S3 resource object that is now configured with the
# credentials to access your S3 buckets.
for bucket in s3_resource.buckets.all():
print(bucket.name)
To get a session with an assumed role:
import botocore
import boto3
import datetime
from dateutil.tz import tzlocal
assume_role_cache: dict = {}
def assumed_role_session(role_arn: str, base_session: botocore.session.Session = None):
base_session = base_session or boto3.session.Session()._session
fetcher = botocore.credentials.AssumeRoleCredentialFetcher(
client_creator = base_session.create_client,
source_credentials = base_session.get_credentials(),
role_arn = role_arn,
extra_args = {
# 'RoleSessionName': None # set this if you want something non-default
}
)
creds = botocore.credentials.DeferredRefreshableCredentials(
method = 'assume-role',
refresh_using = fetcher.fetch_credentials,
time_fetcher = lambda: datetime.datetime.now(tzlocal())
)
botocore_session = botocore.session.Session()
botocore_session._credentials = creds
return boto3.Session(botocore_session = botocore_session)
# usage:
session = assumed_role_session('arn:aws:iam::ACCOUNTID:role/ROLE_NAME')
ec2 = session.client('ec2') # ... etc.
The resulting session's credentials will be automatically refreshed when required which is quite nice.
Note: my previous answer was outright wrong but I can't delete it, so I've replaced it with a better and working answer.
You can assume role using STS token, like:
class Boto3STSService(object):
def __init__(self, arn):
sess = Session(aws_access_key_id=ARN_ACCESS_KEY,
aws_secret_access_key=ARN_SECRET_KEY)
sts_connection = sess.client('sts')
assume_role_object = sts_connection.assume_role(
RoleArn=arn, RoleSessionName=ARN_ROLE_SESSION_NAME,
DurationSeconds=3600)
self.credentials = assume_role_object['Credentials']
This will give you temporary access key and secret keys, with session token. With these temporary credentials, you can access any service. For Eg, if you want to access ELB, you can use the below code:
self.tmp_credentials = Boto3STSService(arn).credentials
def get_boto3_session(self):
tmp_access_key = self.tmp_credentials['AccessKeyId']
tmp_secret_key = self.tmp_credentials['SecretAccessKey']
security_token = self.tmp_credentials['SessionToken']
boto3_session = Session(
aws_access_key_id=tmp_access_key,
aws_secret_access_key=tmp_secret_key, aws_session_token=security_token
)
return boto3_session
def get_elb_boto3_connection(self, region):
sess = self.get_boto3_session()
elb_conn = sess.client(service_name='elb', region_name=region)
return elb_conn
with reference to the solution by #jarrad which is not working as of Feb 2021, and as a solution that does not use STS explicitly please see the following
import boto3
import botocore.session
from botocore.credentials import AssumeRoleCredentialFetcher, DeferredRefreshableCredentials
def get_boto3_session(assume_role_arn=None):
session = boto3.Session(aws_access_key_id="abc", aws_secret_access_key="def")
if not assume_role_arn:
return session
fetcher = AssumeRoleCredentialFetcher(
client_creator=_get_client_creator(session),
source_credentials=session.get_credentials(),
role_arn=assume_role_arn,
)
botocore_session = botocore.session.Session()
botocore_session._credentials = DeferredRefreshableCredentials(
method='assume-role',
refresh_using=fetcher.fetch_credentials
)
return boto3.Session(botocore_session=botocore_session)
def _get_client_creator(session):
def client_creator(service_name, **kwargs):
return session.client(service_name, **kwargs)
return client_creator
the function can be called as follows
ec2_client = get_boto3_session(role_arn='my_role_arn').client('ec2', region_name='us-east-1')
If you want a functional implementation, this is what I settled on:
def filter_none_values(kwargs: dict) -> dict:
"""Returns a new dictionary excluding items where value was None"""
return {k: v for k, v in kwargs.items() if v is not None}
def assume_session(
role_session_name: str,
role_arn: str,
duration_seconds: Optional[int] = None,
region_name: Optional[str] = None,
) -> boto3.Session:
"""
Returns a session with the given name and role.
If not specified, duration will be set by AWS, probably at 1 hour.
If not specified, region will be left unset.
Region can be overridden by each client or resource spawned from this session.
"""
assume_role_kwargs = filter_none_values(
{
"RoleSessionName": role_session_name,
"RoleArn": role_arn,
"DurationSeconds": duration_seconds,
}
)
credentials = boto3.client("sts").assume_role(**assume_role_kwargs)["Credentials"]
create_session_kwargs = filter_none_values(
{
"aws_access_key_id": credentials["AccessKeyId"],
"aws_secret_access_key": credentials["SecretAccessKey"],
"aws_session_token": credentials["SessionToken"],
"region_name": region_name,
}
)
return boto3.Session(**create_session_kwargs)
def main() -> None:
session = assume_session(
"MyCustomSessionName",
"arn:aws:iam::XXXXXXXXXXXX:role/TheRoleIWantToAssume",
region_name="us-east-1",
)
client = session.client(service_name="ec2")
print(client.describe_key_pairs())
import json
import boto3
roleARN = 'arn:aws:iam::account-of-role-to-assume:role/name-of-role'
client = boto3.client('sts')
response = client.assume_role(RoleArn=roleARN,
RoleSessionName='RoleSessionName',
DurationSeconds=900)
dynamodb_client = boto3.client('dynamodb', region_name='us-east-1',
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token = response['Credentials']['SessionToken'])
response = dynamodb_client.get_item(
Key={
'key1': {
'S': '1',
},
'key2': {
'S': '2',
},
},
TableName='TestTable')
print(response)
#!/usr/bin/env python3
import boto3
sts_client = boto3.client('sts')
assumed_role = sts_client.assume_role(RoleArn = "arn:aws:iam::123456789012:role/example_role",
RoleSessionName = "AssumeRoleSession1",
DurationSeconds = 1800)
session = boto3.Session(
aws_access_key_id = assumed_role['Credentials']['AccessKeyId'],
aws_secret_access_key = assumed_role['Credentials']['SecretAccessKey'],
aws_session_token = assumed_role['Credentials']['SessionToken'],
region_name = 'us-west-1'
)
# now we make use of the role to retrieve a parameter from SSM
client = session.client('ssm')
response = client.get_parameter(
Name = '/this/is/a/path/parameter',
WithDecryption = True
)
print(response)
Assuming that 1) the ~/.aws/config or ~/.aws/credentials file is populated with each of the roles that you wish to assume and that 2) the default role has AssumeRole defined in its IAM policy for each of those roles, then you can simply (in pseudo-code) do the following and not have to fuss with STS:
import boto3
# get all of the roles from the AWS config/credentials file using a config file parser
profiles = get_profiles()
for profile in profiles:
# this is only used to fetch the available regions
initial_session = boto3.Session(profile_name=profile)
# get the regions
regions = boto3.Session.get_available_regions('ec2')
# cycle through the regions, setting up session, resource and client objects
for region in regions:
boto3_session = boto3.Session(profile_name=profile, region_name=region)
boto3_resource = boto3_session.resource(service_name='s3', region_name=region)
boto3_client = boto3_session.client(service_name='s3', region_name=region)
[ do something interesting with your session/resource/client here ]
Credential Setup (boto3 - Shared Credentials File)
Assume Role Setup (AWS)
After a few days of searching, this is the simplest solution I have found. explained here but does not have a usage example.
import boto3
for profile in boto3.Session().available_profiles:
boto3.DEFAULT_SESSION = boto3.session.Session(profile_name=profile)
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
print(bucket)
This will switch the default role you will be using. To not make the profile the default, just do not assign it to boto3.DEFAULT_SESSION. but instead, do the following.
testing_profile = boto3.session.Session(profile_name='mainTesting')
s3 = testing_profile.resource('s3')
for bucket in s3.buckets.all():
print(bucket)
Important to note that the .aws credentials need to be set in a specific way.
[default]
aws_access_key_id = default_access_id
aws_secret_access_key = default_access_key
[main]
aws_access_key_id = main_profile_access_id
aws_secret_access_key = main_profile_access_key
[mainTesting]
source_profile = main
role_arn = Testing role arn
mfa_serial = mfa_arn_for_main_role
[mainProduction]
source_profile = main
role_arn = Production role arn
mfa_serial = mfa_arn_for_main_role
I don't know why but the mfa_serial key has to be on the roles for this to work instead of the source account which would make more sense.
Here's the code snippet I used
sts_client = boto3.client('sts')
assumed_role_object = sts_client.assume_role(
RoleArn=<arn of the role to assume>,
RoleSessionName="<role session name>"
)
print(assumed_role_object)
credentials = assumed_role_object['Credentials']
session = Session(
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
self.s3 = session.client('s3')