Fixed first version of redirect and S3 policies

This commit is contained in:
2024-04-23 20:17:51 +02:00
parent fc03ef8ecd
commit 2cdc051191
2 changed files with 80 additions and 21 deletions

View File

@@ -1,21 +1,30 @@
import os
import boto3 import boto3
import json import json
import boto3.exceptions import boto3.exceptions
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
s3_client = None s3_client = None
bucket_config = ''
bucket_data = ''
def lambda_handler(event: dict, context): def lambda_handler(event: dict, context):
global s3_client global s3_client, bucket_config, bucket_data
if s3_client is None: if s3_client is None:
print("Init Function")
bucket_config = os.environ.get('BUCKET_CONFIG', 'standout-config')
bucket_data = os.environ.get('BUCKET_DATA', 'standout-data')
print(f'Bucket Config: {bucket_config}')
print(f' Bucket Data: {bucket_data}')
s3_client = boto3.client('s3') s3_client = boto3.client('s3')
for x in s3_client.list_buckets()['Buckets']: for x in s3_client.list_buckets()['Buckets']:
print(f"{x['Name']}: {x['CreationDate'].isoformat()}") print(f"{x['Name']}: {x['CreationDate'].isoformat()}")
try: try:
resp = s3_client.get_object( resp = s3_client.get_object(
Bucket='standout-data', Bucket=bucket_config,
Key='redirects.json' Key='redirects.json'
) )
except s3_client.exceptions.NoSuchKey as e: except s3_client.exceptions.NoSuchKey as e:
@@ -27,12 +36,12 @@ def lambda_handler(event: dict, context):
try: try:
redirects = json.load(resp["Body"]) redirects = json.load(resp["Body"])
params = event.get('queryStringParameters', {}) params = event.get('queryStringParameters', {})
customer = redirects.get(params['cust_id'], {}) customer = redirects.get(params['id'], {})
gadget = customer.get(params['gadget_id'], {}) tag = customer.get(params['tag_id'], {})
content = gadget.get('content', None) content = tag.get('content', None)
dest = None dest = None
if content and isinstance(content, dict): if content and isinstance(content, dict) and not "type" in content.keys():
dest = content[params['face_id']] dest = content[params['face_id']]
else: else:
dest = content dest = content
@@ -41,9 +50,10 @@ def lambda_handler(event: dict, context):
match dest.get('type', 's3'): match dest.get('type', 's3'):
case "s3": case "s3":
try: try:
key = f'{params['id']}/{params['tag_id']}/{dest['key']}'
response = s3_client.generate_presigned_url('get_object', response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': 'standout-data', Params={'Bucket': bucket_data,
'Key': dest['key']}, 'Key': key},
ExpiresIn=120) ExpiresIn=120)
except ClientError as e: except ClientError as e:
print(e) print(e)
@@ -77,9 +87,9 @@ if __name__ == "__main__":
"path": "/", "path": "/",
"httpMethod": "GET", "httpMethod": "GET",
"queryStringParameters": { "queryStringParameters": {
"cust_id": "cust_id1", "id": "customer1",
"gadget_id": "gadget_id1", "tag_id": "tag3",
"face_id": "face_id3" "face_id": "face1"
}, },
} }

View File

@@ -17,6 +17,12 @@ resource "aws_vpc" "vpc_standout" {
cidr_block = "10.0.0.0/16" cidr_block = "10.0.0.0/16"
} }
# create an s3 bucket for config
resource "aws_s3_bucket" "s3_standout_config" {
bucket = "standout-config"
force_destroy = false
}
# create an s3 bucket for data # create an s3 bucket for data
resource "aws_s3_bucket" "s3_standout" { resource "aws_s3_bucket" "s3_standout" {
bucket = "standout-data" bucket = "standout-data"
@@ -30,11 +36,27 @@ resource "aws_s3_bucket_ownership_controls" "s3_standout_ownership" {
} }
} }
resource "aws_s3_bucket_ownership_controls" "s3_standout_config_ownership" {
bucket = aws_s3_bucket.s3_standout_config.id
rule {
object_ownership = "BucketOwnerPreferred"
}
}
resource "aws_s3_bucket_public_access_block" "s3_standout_public_access" { resource "aws_s3_bucket_public_access_block" "s3_standout_public_access" {
bucket = aws_s3_bucket.s3_standout.id bucket = aws_s3_bucket.s3_standout.id
block_public_acls = true block_public_acls = false
block_public_policy = true block_public_policy = false
ignore_public_acls = true
restrict_public_buckets = true
}
resource "aws_s3_bucket_public_access_block" "s3_standout_config_public_access" {
bucket = aws_s3_bucket.s3_standout_config.id
block_public_acls = false
block_public_policy = false
ignore_public_acls = true ignore_public_acls = true
restrict_public_buckets = true restrict_public_buckets = true
} }
@@ -54,6 +76,30 @@ resource "aws_s3_bucket_policy" "s3_standout_policy" {
policy = data.aws_iam_policy_document.s3_standout_allow_lambda.json policy = data.aws_iam_policy_document.s3_standout_allow_lambda.json
} }
resource "aws_s3_bucket_policy" "s3_standout_config_policy" {
bucket = aws_s3_bucket.s3_standout_config.id
policy = data.aws_iam_policy_document.s3_standout_config_allow_lambda.json
}
data "aws_iam_policy_document" "s3_standout_config_allow_lambda" {
statement {
principals {
type = "AWS"
identifiers = ["*"]
}
actions = [
"s3:Get*",
"s3:List*",
"s3:Put*",
]
resources = [
"${aws_s3_bucket.s3_standout_config.arn}/*",
]
}
}
data "aws_iam_policy_document" "s3_standout_allow_lambda" { data "aws_iam_policy_document" "s3_standout_allow_lambda" {
statement { statement {
principals { principals {
@@ -64,7 +110,7 @@ data "aws_iam_policy_document" "s3_standout_allow_lambda" {
actions = [ actions = [
"s3:Get*", "s3:Get*",
"s3:List*", "s3:List*",
"s3:Put*" "s3:Put*",
] ]
resources = [ resources = [
@@ -108,13 +154,16 @@ resource "aws_lambda_function" "lambda_standout_redirect" {
source_code_hash = data.archive_file.lambda_standout_code.output_base64sha256 source_code_hash = data.archive_file.lambda_standout_code.output_base64sha256
runtime = "python3.10" runtime = "python3.12"
#environment { timeout = 10
# variables = {
# foo = "bar" environment {
# } variables = {
#} BUCKET_CONFIG = aws_s3_bucket.s3_standout_config.bucket,
BUCKET_DATA = aws_s3_bucket.s3_standout.bucket
}
}
} }
# create API gateway for lambda triger and connect # create API gateway for lambda triger and connect