Skip to content

only handle .zip files #7

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 16 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file removed ECSLogo.zip
Binary file not shown.
14 changes: 8 additions & 6 deletions config.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
# Constants (User configurable)

FULL_NAME_AND_EMAIL = 'First Last <email@domain.com>' # For Dockerfile/POV-Ray builds.
APP_NAME = 'ECSPOVRayWorker' # Used to generate derivative names unique to the application.
FULL_NAME_AND_EMAIL = 'Peter Lepeska <peter.lepeska@viasat.com>' # For Dockerfile/POV-Ray builds.
APP_NAME = 'ecs-test-plepeska' # Used to generate derivative names unique to the application.

DOCKERHUB_USER = 'username'
# create a repo here https://console.aws.amazon.com/ecs/home?region=us-east-1#/repositories
# DOCKERHUB_USER will be created as part of that
DOCKERHUB_USER = '831754492748.dkr.ecr.us-east-1.amazonaws.com'
DOCKERHUB_EMAIL = 'email@domain.com'
DOCKERHUB_REPO = 'private'
DOCKERHUB_REPO = 'ecs-test'
DOCKERHUB_TAG = DOCKERHUB_USER + '/' + DOCKERHUB_REPO + ':' + APP_NAME

AWS_REGION = 'us-east-1'
AWS_PROFILE = 'default' # The same profile used by your AWS CLI installation

SSH_KEY_NAME = 'your-ssh-key.pem' # Expected to be in ~/.ssh
ECS_CLUSTER = 'default'
SSH_KEY_NAME = 'automation.pem' # Expected to be in ~/.ssh
ECS_CLUSTER = 'peter'
72 changes: 36 additions & 36 deletions ecs-worker-launcher/ecs-worker-launcher.js
Original file line number Diff line number Diff line change
Expand Up @@ -50,41 +50,41 @@ exports.handler = function(event, context) {

if(!exports.checkS3SuffixWhitelist(key, config.s3_key_suffix_whitelist)) {
context.fail('Suffix for key: ' + key + ' is not in the whitelist')
}

// We can now go on. Put the Amazon S3 URL into Amazon SQS and start an Amazon ECS task to process it.
async.waterfall([
function (next) {
var params = {
MessageBody: JSON.stringify(event),
QueueUrl: config.queue
};
sqs.sendMessage(params, function (err, data) {
if (err) { console.warn('Error while sending message: ' + err); }
else { console.info('Message sent, ID: ' + data.MessageId); }
next(err);
});
},
function (next) {
// Starts an ECS task to work through the feeds.
var params = {
taskDefinition: config.task,
count: 1,
cluster: 'default'
};
ecs.runTask(params, function (err, data) {
if (err) { console.warn('error: ', "Error while starting task: " + err); }
else { console.info('Task ' + config.task + ' started: ' + JSON.stringify(data.tasks))}
next(err);
});
}
], function (err) {
if (err) {
context.fail('An error has occurred: ' + err);
}
else {
context.succeed('Successfully processed Amazon S3 URL.');
} else {
// We can now go on. Put the Amazon S3 URL into Amazon SQS and start an Amazon ECS task to process it.
async.waterfall([
function (next) {
var params = {
MessageBody: JSON.stringify(event),
QueueUrl: config.queue
};
sqs.sendMessage(params, function (err, data) {
if (err) { console.warn('Error while sending message: ' + err); }
else { console.info('Message sent, ID: ' + data.MessageId); }
next(err);
});
},
function (next) {
// Starts an ECS task to work through the feeds.
var params = {
taskDefinition: config.task,
count: 1,
cluster: config.cluster
};
ecs.runTask(params, function (err, data) {
if (err) { console.warn('error: ', "Error while starting task: " + err); }
else { console.info('Task ' + config.task + ' started: ' + JSON.stringify(data.tasks))}
next(err);
});
}
], function (err) {
if (err) {
context.fail('An error has occurred: ' + err);
}
else {
context.succeed('Successfully processed Amazon S3 URL.');
}
}
}
);
);
}
};
55 changes: 19 additions & 36 deletions ecs-worker/ecs-worker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#
# Simple POV-Ray worker shell script.
#
# Uses the AWS CLI utility to fetch a message from SQS, fetch a ZIP file from S3 that was specified in the message,
# Uses the AWS CLI utility to fetch a message from SQS, fetch a BEER file from S3 that was specified in the message,
# render its contents with POV-Ray, then upload the resulting .png file to the same S3 bucket.
#

Expand All @@ -25,7 +25,7 @@ queue=${SQS_QUEUE_URL}
# Fetch messages and render them until the queue is drained.
while [ /bin/true ]; do
# Fetch the next message and extract the S3 URL to fetch the POV-Ray source ZIP from.
echo "Fetching messages fom SQS queue: ${queue}..."
echo "Fetching messages fom SQS queue: ${queue}..." >> ecs-test.log
result=$( \
aws sqs receive-message \
--queue-url ${queue} \
Expand All @@ -36,19 +36,18 @@ while [ /bin/true ]; do
)

if [ -z "${result}" ]; then
echo "No messages left in queue. Exiting."
exit 0
sleep 1 # sleep for one second
else
echo "Message: ${result}."
echo "Message: ${result}." >> ecs-test.log

receipt_handle=$(echo ${result} | sed -e 's/^.*"\([^"]*\)"\s*\]$/\1/')
echo "Receipt handle: ${receipt_handle}."
echo "Receipt handle: ${receipt_handle}." >> ecs-test.log

bucket=$(echo ${result} | sed -e 's/^.*arn:aws:s3:::\([^\\]*\)\\".*$/\1/')
echo "Bucket: ${bucket}."
echo "Bucket: ${bucket}." >> ecs-test.log

key=$(echo ${result} | sed -e 's/^.*\\"key\\":\s*\\"\([^\\]*\)\\".*$/\1/')
echo "Key: ${key}."
echo "Key: ${key}." >> ecs-test.log

base=${key%.*}
ext=${key##*.}
Expand All @@ -59,45 +58,29 @@ while [ /bin/true ]; do
-n "${key}" -a \
-n "${base}" -a \
-n "${ext}" -a \
"${ext}" = "zip" \
"${ext}" = "beer" \
]; then
mkdir -p work
pushd work

echo "Copying ${key} from S3 bucket ${bucket}..."
echo "Copying ${key} from S3 bucket ${bucket}..." >> ../ecs-test.log
aws s3 cp s3://${bucket}/${key} . --region ${region}

echo "Unzipping ${key}..."
unzip ${key}
echo "Copy log file to S3 bucket." >> ../ecs-test.log
aws s3 cp ../ecs-test.log s3://${bucket}/ecs-test.${key}.log

if [ -f ${base}.ini ]; then
echo "Rendering POV-Ray scene ${base}..."
if povray ${base}; then
if [ -f ${base}.png ]; then
echo "Copying result image ${base}.png to s3://${bucket}/${base}.png..."
aws s3 cp ${base}.png s3://${bucket}/${base}.png
else
echo "ERROR: POV-Ray source did not generate ${base}.png image."
fi
else
echo "ERROR: POV-Ray source did not render successfully."
fi
else
echo "ERROR: No ${base}.ini file found in POV-Ray source archive."
fi

echo "Cleaning up..."
echo "Cleaning up..." >> ../ecs-test.log
popd
/bin/rm -rf work

echo "Deleting message..."
aws sqs delete-message \
--queue-url ${queue} \
--region ${region} \
--receipt-handle "${receipt_handle}"

else
echo "ERROR: Could not extract S3 bucket and key from SQS message."
echo "ERROR: Could not extract S3 bucket and key from SQS message." >> ecs-test.log
fi
echo "Deleting message..." >> ecs-test.log
aws sqs delete-message \
--queue-url ${queue} \
--region ${region} \
--receipt-handle "${receipt_handle}"

fi
done
38 changes: 23 additions & 15 deletions fabfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,16 @@
from config import *

# Constants (Application specific)
BUCKET_POSTFIX = '-pov-ray-bucket' # Gets put after the unix user ID to create the bucket name.
BUCKET_POSTFIX = 'pov-ray-bucket' # Gets put after the unix user ID to create the bucket name.
SSH_KEY_DIR = os.environ['HOME'] + '/.ssh'
SQS_QUEUE_NAME = APP_NAME + 'Queue'
LAMBDA_FUNCTION_NAME = 'ecs-worker-launcher'
LAMBDA_FUNCTION_NAME = APP_NAME + '-ecs-worker-launcher'
LAMBDA_FUNCTION_SUBDIR = 'ecs-worker-launcher'
LAMBDA_FUNCTION_DEPENDENCIES = 'async'
ECS_TASK_NAME = APP_NAME + 'Task'

# Constants (OS specific)
USER = os.environ['HOME'].split('/')[-1]
AWS_BUCKET = USER + BUCKET_POSTFIX
AWS_BUCKET = APP_NAME + '-' + BUCKET_POSTFIX
AWS_CONFIG_FILE_NAME = os.environ['HOME'] + '/.aws/config'
AWS_CREDENTIAL_FILE_NAME = os.environ['HOME'] + '/.aws/credentials'

Expand All @@ -62,7 +62,7 @@
SSH_USER = 'ec2-user'
CPU_SHARES = 512 # POV-Ray needs at least half a CPU to work nicely.
MEMORY = 512
ZIPFILE_NAME = LAMBDA_FUNCTION_NAME + '.zip'
ZIPFILE_NAME = LAMBDA_FUNCTION_SUBDIR + '.zip'

BUCKET_PERMISSION_SID = APP_NAME + 'Permission'
WAIT_TIME = 5 # seconds to allow for eventual consistency to kick in.
Expand Down Expand Up @@ -106,12 +106,12 @@
}

LAMBDA_FUNCTION_CONFIG = {
"s3_key_suffix_whitelist": ['.zip'], # Only S3 keys with this URL will be accepted.
"s3_key_suffix_whitelist": ['.beer'], # Only S3 keys with this URL will be accepted.
"queue": '', # To be filled in with the queue ARN.
"task": ECS_TASK_NAME
}

LAMBDA_FUNCTION_CONFIG_PATH = './' + LAMBDA_FUNCTION_NAME + '/config.json'
LAMBDA_FUNCTION_CONFIG_PATH = './' + LAMBDA_FUNCTION_SUBDIR + '/config.json'

BUCKET_NOTIFICATION_CONFIGURATION = {
"LambdaFunctionConfigurations": [
Expand All @@ -132,9 +132,16 @@
{
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets"
"s3:ListAllMyBuckets",
"sqs:ReceiveMessage",
"sqs:DeleteMessage"
],
"Resource": "arn:aws:s3:::*"
"Resource": [
"arn:aws:s3:::*",
"arn:aws:sqs:*:*:*",
"arn:aws:sqs:*:*:*"
]

},
{
"Effect": "Allow",
Expand Down Expand Up @@ -249,7 +256,7 @@

def update_dependencies():
local('pip2 install -r requirements.txt')
local('cd ' + LAMBDA_FUNCTION_NAME + '; npm install ' + LAMBDA_FUNCTION_DEPENDENCIES)
local('cd ' + LAMBDA_FUNCTION_SUBDIR + '; npm install ' + LAMBDA_FUNCTION_DEPENDENCIES)


def get_aws_credentials():
Expand All @@ -272,6 +279,7 @@ def dump_lambda_function_configuration():
print('Writing config for Lambda function...')
lambda_function_config = LAMBDA_FUNCTION_CONFIG.copy()
lambda_function_config['queue'] = get_queue_url()
lambda_function_config['cluster'] = ECS_CLUSTER
with open(LAMBDA_FUNCTION_CONFIG_PATH, 'w') as fp:
fp.write(json.dumps(lambda_function_config))

Expand All @@ -280,7 +288,7 @@ def create_lambda_deployment_package():
print('Creating ZIP file: ' + ZIPFILE_NAME + '...')
with ZipFile(ZIPFILE_NAME, 'w', ZIP_DEFLATED) as z:
saved_dir = os.getcwd()
os.chdir(LAMBDA_FUNCTION_NAME)
os.chdir(LAMBDA_FUNCTION_SUBDIR)
for root, dirs, files in os.walk('.'):
for basename in files:
filename = os.path.join(root, basename)
Expand Down Expand Up @@ -385,7 +393,7 @@ def update_lambda_function():
' --function-name ' + LAMBDA_FUNCTION_NAME +
' --zip-file fileb://./' + ZIPFILE_NAME +
' --role ' + role_arn +
' --handler ' + LAMBDA_FUNCTION_NAME + '.handler' +
' --handler ' + LAMBDA_FUNCTION_SUBDIR + '.handler' +
' --runtime nodejs' +
AWS_CLI_STANDARD_OPTIONS,
capture=True
Expand All @@ -411,6 +419,7 @@ def get_s3_connection():
def get_or_create_bucket():
s3 = get_s3_connection()
b = s3.lookup(AWS_BUCKET)
print b
if b is None:
print('Creating bucket: ' + AWS_BUCKET + ' in region: ' + AWS_REGION + '...')
LOCATION = AWS_REGION if AWS_REGION != 'us-east-1' else ''
Expand Down Expand Up @@ -555,7 +564,7 @@ def get_container_instances():
result = json.loads(local(
'aws ecs list-container-instances' +
' --query containerInstanceArns' +
' --cluster ' + ECS_CLUSTER +
' --cluster ' + ECS_CLUSTER +
AWS_CLI_STANDARD_OPTIONS,
capture=True
))
Expand Down Expand Up @@ -588,7 +597,6 @@ def prepare_env():
env.user = SSH_USER
env.key_filename = SSH_KEY_DIR + '/' + SSH_KEY_NAME


def generate_dockerfile():
return DOCKERFILE % {'name': FULL_NAME_AND_EMAIL, 'worker_file': WORKER_FILE}

Expand All @@ -615,6 +623,7 @@ def update_ecs_image():
login_str = local('aws ecr get-login', capture=True)
print(login_str)
run('%s' % login_str)
run('docker images')
run('docker push ' + DOCKERHUB_TAG)

# Cleanup.
Expand All @@ -638,7 +647,6 @@ def show_task_definition():

def update_ecs_task_definition():
task_definition_string = json.dumps(generate_task_definition())

local(
'aws ecs register-task-definition' +
' --family ' + ECS_TASK_NAME +
Expand Down
Loading