Skip to content

Instantly share code, notes, and snippets.

@vpnagraj
Last active February 10, 2026 03:17
Show Gist options
  • Select an option

  • Save vpnagraj/fea39d08e46814781a8dabbd7f22ba13 to your computer and use it in GitHub Desktop.

Select an option

Save vpnagraj/fea39d08e46814781a8dabbd7f22ba13 to your computer and use it in GitHub Desktop.
Example of AWS provisioning with boto3. Note that provision_infrastructure.py depends on input for provisioning_config.yaml, which includes some placeholder to be filled in as needed.
import boto3
import logging
import yaml
import json
import time
import argparse
## configure logging
## this will print logged messages to console ...
## ... and store in a local provision.log file
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler("provision.log", mode="a", encoding="utf-8")
]
)
## instantiate logger
logger = logging.getLogger(__name__)
## define function to read in the yaml contents
def load_config(path="provisioning_config.yaml"):
with open(path, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
CONFIG = load_config("config.yaml")
def create_s3_bucket(bucket_name, region):
"""
Create an S3 bucket in the specified region.
Args:
bucket_name (str): Name of the bucket to create
region (str): AWS region
Returns:
str: Bucket name if successful, None otherwise
"""
s3 = boto3.client('s3', region_name=region)
## create the bucket and handle location constraint for region if needed
try:
if region == 'us-east-1':
s3.create_bucket(Bucket=bucket_name)
else:
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region
}
)
logger.info(f"Successfully created bucket: {bucket_name}")
return bucket_name
except Exception as e:
error_code = e.response["Error"]["Code"]
## for exception handling conditionally switch on the error code
## capture informative logging where we can
## also raise runtime errors where appropriate to make sure processing in main() doesn't expect bucket that isn't there
## bucket already exists under the current account
## NOTE: this will only be raised if the region isn't us-east-1
if error_code == "BucketAlreadyOwnedByYou":
print(f"{e}")
logger.info(f"Failed to create bucket: {bucket_name}. Bucket appears to already exist in this account. The full error states: {e}")
## bucket already exists globally (in any account)
elif error_code == "BucketAlreadyExists":
logger.error(f"Failed to create bucket: {bucket_name}. Bucket appears to already exist globally. The full error states: {e}")
raise RuntimeError(f"{e}")
## bucket can't be created in the specified region
## NOTE: this shouldn't happen given the handling of location constraint for non us-east-1 regions ... but just in case
elif error_code == "IllegalLocationConstraintException":
logger.error(f"Failed to create bucket: {bucket_name}. Bucket may not be created in the specified region. The full error states: {e}")
raise RuntimeError(f"{e}")
## anything else ...
else:
logger.error(f"Failed to create bucket: {bucket_name}. The full error states: {e}")
raise RuntimeError(f"{e}")
pass
def create_iam_role(role_name):
"""
Create an IAM role for EC2 with a trust policy.
Args:
role_name (str): Name of the IAM role
Returns:
str: Role ARN if successful, None otherwise
"""
iam = boto3.client('iam')
## trust policy allowing EC2 to assume this role
trust_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}
try:
response = iam.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(trust_policy),
Description='Role for EC2 to access S3'
)
logger.info(f"Created IAM role: {role_name}")
return response['Role']['Arn']
except iam.exceptions.EntityAlreadyExistsException:
logger.warning(f"Role {role_name} already exists")
role_arn = iam.get_role(RoleName=role_name)['Role']['Arn']
return role_arn
except Exception as e:
logger.error(f"Error creating IAM role: {e}")
return None
def attach_s3_policy(role_name, bucket_name):
"""
Attach an inline policy to the role granting S3 access.
Args:
role_name (str): Name of the IAM role
bucket_name (str): Name of the S3 bucket to grant access to
Returns:
bool: True if successful, False otherwise
"""
iam = boto3.client('iam')
policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::" + bucket_name,
"arn:aws:s3:::" + bucket_name + "/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets"
],
"Resource": [
"*"
]
}
]
}
try:
## use iam to attach the policy to the role
## NOTE: have to use json.dumps to convert the python dict to a string
iam.put_role_policy(RoleName = role_name, PolicyName = 'S3AccessPolicy', PolicyDocument = json.dumps(policy_document))
logger.info(f"Attached S3 policy to role {role_name}")
return True
except Exception as e:
logger.error(f"Error attaching policy: {e}")
return False
def create_instance_profile(profile_name, role_name):
"""
Create an instance profile and add the IAM role to it.
Args:
profile_name (str): Name of the instance profile
role_name (str): Name of the IAM role to add
Returns:
str: Instance profile ARN if successful, None otherwise
"""
iam = boto3.client('iam')
try:
## create the instance profile
iam.create_instance_profile(InstanceProfileName = profile_name)
## add the instance profile to the role specified
iam.add_role_to_instance_profile(InstanceProfileName = profile_name, RoleName = role_name)
logger.info(f"Created instance profile {profile_name} and attached it to role {role_name}")
instance_profile_arn = iam.get_instance_profile(InstanceProfileName = profile_name)['InstanceProfile']['Arn']
return instance_profile_arn
except iam.exceptions.EntityAlreadyExistsException:
iam.add_role_to_instance_profile(InstanceProfileName = profile_name, RoleName = role_name)
logger.info(f"Instance profile {profile_name} already exists; attached it to role {role_name}")
instance_profile_arn = iam.get_instance_profile(InstanceProfileName = profile_name)['InstanceProfile']['Arn']
return instance_profile_arn
except Exception as e:
logger.error(f"Error creating instance profile: {e}")
return None
def allocate_elastic_ip():
"""
Allocate an Elastic IP address.
Returns:
dict: Dictionary with 'AllocationId' and 'PublicIp', or None
"""
ec2 = boto3.client('ec2', region_name=CONFIG['region'])
try:
## allocate elastic IP
## capture respose so we can log the public IP
eip = ec2.allocate_address(Domain = 'vpc')
logger.info(f"Allocated Elastic IP at {eip['PublicIp']}")
## construct dict with elastic IP info
eip_info = {
"AllocationId": eip['AllocationId'],
"PublicIp": eip['PublicIp']
}
return eip_info
except Exception as e:
logger.error(f"Error allocating Elastic IP: {e}")
return None
def associate_elastic_ip(instance_id, allocation_id):
"""
Associate an Elastic IP with an EC2 instance.
Args:
instance_id (str): ID of the EC2 instance
allocation_id (str): Allocation ID of the Elastic IP
Returns:
str: Association ID if successful, None otherwise
"""
ec2 = boto3.client('ec2', region_name=CONFIG['region'])
try:
## associate the elastic IP with the EC2 instance
assoc = ec2.associate_address(AllocationId = allocation_id, InstanceId = instance_id)
assoc_id = assoc['AssociationId']
logger.info(f"Association {assoc_id} successfully created.")
return assoc_id
except Exception as e:
logger.error(f"Error associating Elastic IP: {e}")
return None
def launch_ec2_instance(instance_type, ami_id, key_name, security_group, instance_profile_name, bucket_name):
"""
Launch an EC2 instance with user data and instance profile.
Args:
instance_type (str): EC2 instance type
ami_id (str): AMI ID to use
key_name (str): SSH key pair name
security_group (str): Security Group ID
instance_profile_name (str): Instance profile name
bucket_name (str): S3 bucket name
Returns:
str: Instance ID if successful, None otherwise
"""
ec2 = boto3.client('ec2', region_name=CONFIG['region'])
# User data script - bootstraps the instance
user_data_script = f"""#!/bin/bash
apt update
apt upgrade -y
snap install docker
sleep 10
docker run -d --restart=always -p 8888:8888 quay.io/jupyter/base-notebook start-notebook.py --NotebookApp.token='my-token'
# Test S3 access by copying a file
aws s3 cp /var/log/apt/history.log s3://{bucket_name}/
"""
try:
response = ec2.run_instances(
ImageId=ami_id,
InstanceType=instance_type,
KeyName=key_name,
MinCount=1,
MaxCount=1,
UserData=user_data_script,
SecurityGroupIds=[security_group],
IamInstanceProfile={'Name': instance_profile_name},
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{'Key': 'Name', 'Value': 'boto3-lab-instance'},
{'Key': 'Lab', 'Value': 'IaC-Python'}
]
}
]
)
instance_id = response['Instances'][0]['InstanceId']
logger.info(f"Launched instance: {instance_id}")
## instantiate waiter to wait for instance to be running
waiter = ec2.get_waiter('instance_running')
waiter.wait(InstanceIds=[instance_id])
return instance_id
except Exception as e:
logger.error(f"Error launching instance: {e}")
return None
def cleanup(instance_id, allocation_id):
"""
Perform cleanup.
Args:
instance_id (str): ID of the EC2 instance
allocation_id (str): Allocation ID of the Elastic IP
Returns:
None
"""
ec2 = boto3.client('ec2', region_name=CONFIG['region'])
s3 = boto3.client('s3', region_name=CONFIG['region'])
iam = boto3.client('iam')
try:
## try to delete resources ...
ec2.terminate_instances(InstanceIds = [instance_id])
ec2.release_address(AllocationId = allocation_id)
iam.remove_role_from_instance_profile(InstanceProfileName=CONFIG['instance']['instance_profile_name'], RoleName=CONFIG['instance']['role_name'])
iam.delete_instance_profile(InstanceProfileName = CONFIG['instance']['instance_profile_name'])
iam.delete_role_policy(RoleName=CONFIG['instance']['role_name'], PolicyName='S3AccessPolicy')
iam.delete_role(RoleName=CONFIG['instance']['role_name'])
s3.delete_bucket(Bucket=CONFIG['bucket_name'])
return None
except Exception as e:
logger.error(f"Error deleting resources: {e}")
return None
def main():
"""
Main function to provision complete infrastructure.
"""
## set up argument parsing
parser = argparse.ArgumentParser()
## add arg parse for cleanup mode
parser.add_argument(
"--cleanup",
action="store_true",
help="Trigger the cleanup"
)
args = parser.parse_args()
logger.info("Starting infrastructure provisioning...")
bucket_name = CONFIG['bucket_name']
region = CONFIG['region']
instance_details = CONFIG['instance']
## step 1: create S3 bucket
create_s3_bucket(
bucket_name=bucket_name,
region=region
)
## step 2: create IAM role
create_iam_role(role_name=instance_details['role_name'])
## step 3: attach S3 policy to role
attach_s3_policy(
role_name=instance_details['role_name'],
bucket_name=bucket_name
)
## step 4: create instance profile
create_instance_profile(
profile_name=instance_details['instance_profile_name'],
role_name=instance_details['role_name']
)
## NOTE: IAM resources need time to propagate
logger.info("Waiting 10 seconds for IAM resources to propagate...")
time.sleep(10)
## step 5: launch EC2 instance
instance_id = launch_ec2_instance(
instance_type = instance_details['instance_type'],
ami_id = instance_details['ami_id'],
key_name = instance_details['key_name'],
security_group = instance_details['security_group_id'],
instance_profile_name = instance_details['instance_profile_name'],
bucket_name = CONFIG['bucket_name']
)
## step 6: allocate Elastic IP
eip_info = allocate_elastic_ip()
## step 7: associate Elastic IP with instance
associate_elastic_ip(
instance_id = instance_id,
allocation_id = eip_info['AllocationId']
)
logger.info("Infrastructure provisioning complete!")
logger.info(f"Your instance is accessible at: {eip_info['PublicIp']}")
logger.info(f"S3 bucket created: {CONFIG['bucket_name']}")
## handle case when we want to clean up
if args.cleanup:
logger.info("Cleanup triggered. Sleeping for 30 seconds before deprovisioning everything...")
time.sleep(30)
cleanup(
instance_id = instance_id,
allocation_id = eip_info['AllocationId']
)
logger.info("Resources deleted.")
return {
'instance_id': instance_id,
'public_ip': eip_info['PublicIp'],
'bucket_name': bucket_name
}
if __name__ == "__main__":
main()
region: us-east-1
bucket_name: {BUCKET-NAME-HERE}
instance:
ami_id: ami-0b6c6ebed2801a5cb # ubuntu 24.04lts
instance_type: t2.medium
key_name: {KEY-NAME-HERE}
security_group_id: {SECURITY-GROUP-HERE}
role_name: EC2-S3-Instance-Role
instance_profile_name: EC2-S3-Instance-Profile
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment