Skip to content

Github issue#194, Separate task deployment for API and event bus consumers #195

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 25, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 17 additions & 18 deletions deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ AWS_ECS_CONTAINER_NAME="tc-project-service"
AWS_REPOSITORY=$(eval "echo \$${ENV}_AWS_REPOSITORY")
AWS_ECS_CLUSTER=$(eval "echo \$${ENV}_AWS_ECS_CLUSTER")
AWS_ECS_SERVICE=$(eval "echo \$${ENV}_AWS_ECS_SERVICE")
AWS_ECS_SERVICE_CONSUMERS=$(eval "echo \$${ENV}_AWS_ECS_SERVICE_CONSUMERS")
AUTH_DOMAIN=$(eval "echo \$${ENV}_AUTH_DOMAIN")
AUTH_SECRET=$(eval "echo \$${ENV}_AUTH_SECRET")
VALID_ISSUERS=$(eval "echo \$${ENV}_VALID_ISSUERS")
Expand All @@ -29,9 +30,9 @@ configure_aws_cli() {
# deploys the app to the ecs cluster
deploy_cluster() {

make_task_def
register_definition
if [[ $(aws ecs update-service --cluster $AWS_ECS_CLUSTER --service $AWS_ECS_SERVICE --task-definition $revision | \
make_task_def $1 $2 $3 $4
register_definition $1
if [[ $(aws ecs update-service --cluster $AWS_ECS_CLUSTER --service $1 --task-definition $revision | \
$JQ '.service.taskDefinition') != $revision ]]; then
echo "Error updating service."
return 1
Expand All @@ -46,6 +47,7 @@ make_task_def(){
"family": "%s",
"requiresCompatibilities": ["EC2", "FARGATE"],
"networkMode": "awsvpc",
"taskRoleArn": "arn:aws:iam::%s:role/tc-project-service-ecs-task-role",
"executionRoleArn": "arn:aws:iam::%s:role/ecsTaskExecutionRole",
"cpu": "1024",
"memory": "2048",
Expand All @@ -56,6 +58,7 @@ make_task_def(){
"essential": true,
"memory": 1536,
"cpu": 768,
"entryPoint": ["%s", "%s", "%s"],
"environment": [
{
"name": "NODE_ENV",
Expand Down Expand Up @@ -85,14 +88,6 @@ make_task_def(){
"name": "AWS_REGION",
"value": "%s"
},
{
"name": "AWS_ACCESS_KEY_ID",
"value": "%s"
},
{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "%s"
},
{
"name": "AUTH_DOMAIN",
"value": "%s"
Expand Down Expand Up @@ -253,12 +248,12 @@ make_task_def(){
KAFKA_URL=$(eval "echo \$${ENV}_KAFKA_URL")


task_def=$(printf "$task_template" $family $ACCOUNT_ID $AWS_ECS_CONTAINER_NAME $ACCOUNT_ID $AWS_REGION $AWS_REPOSITORY $CIRCLE_SHA1 $NODE_ENV $ENABLE_FILE_UPLOAD $LOG_LEVEL $CAPTURE_LOGS $LOGENTRIES_TOKEN $API_VERSION $AWS_REGION $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY $AUTH_DOMAIN $AUTH_SECRET $VALID_ISSUERS $DB_MASTER_URL $MEMBER_SERVICE_ENDPOINT $IDENTITY_SERVICE_ENDPOINT $BUS_API_URL $MESSAGE_SERVICE_URL $SYSTEM_USER_CLIENT_ID $SYSTEM_USER_CLIENT_SECRET $PROJECTS_ES_URL $PROJECTS_ES_INDEX_NAME $RABBITMQ_URL $DIRECT_PROJECT_SERVICE_ENDPOINT $FILE_SERVICE_ENDPOINT $CONNECT_PROJECTS_URL $SEGMENT_ANALYTICS_KEY "$AUTH0_URL" "$AUTH0_AUDIENCE" $AUTH0_CLIENT_ID "$AUTH0_CLIENT_SECRET" $TOKEN_CACHE_TIME "$KAFKA_CLIENT_CERT" "$KAFKA_CLIENT_CERT_KEY" $KAFKA_GROUP_ID $KAFKA_URL $PORT $PORT $AWS_ECS_CLUSTER $AWS_REGION $NODE_ENV)
task_def=$(printf "$task_template" $1 $ACCOUNT_ID $ACCOUNT_ID $AWS_ECS_CONTAINER_NAME $ACCOUNT_ID $AWS_REGION $AWS_REPOSITORY $CIRCLE_SHA1 $2 $3 $4 $NODE_ENV $ENABLE_FILE_UPLOAD $LOG_LEVEL $CAPTURE_LOGS $LOGENTRIES_TOKEN $API_VERSION $AWS_REGION $AUTH_DOMAIN $AUTH_SECRET $VALID_ISSUERS $DB_MASTER_URL $MEMBER_SERVICE_ENDPOINT $IDENTITY_SERVICE_ENDPOINT $BUS_API_URL $MESSAGE_SERVICE_URL $SYSTEM_USER_CLIENT_ID $SYSTEM_USER_CLIENT_SECRET $PROJECTS_ES_URL $PROJECTS_ES_INDEX_NAME $RABBITMQ_URL $DIRECT_PROJECT_SERVICE_ENDPOINT $FILE_SERVICE_ENDPOINT $CONNECT_PROJECTS_URL $SEGMENT_ANALYTICS_KEY "$AUTH0_URL" "$AUTH0_AUDIENCE" $AUTH0_CLIENT_ID "$AUTH0_CLIENT_SECRET" $TOKEN_CACHE_TIME "$KAFKA_CLIENT_CERT" "$KAFKA_CLIENT_CERT_KEY" $KAFKA_GROUP_ID $KAFKA_URL $PORT $PORT $AWS_ECS_CLUSTER $AWS_REGION $NODE_ENV)
}

push_ecr_image(){
eval $(aws ecr get-login --region $AWS_REGION --no-include-email)
docker push $ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY:$CIRCLE_SHA1
docker push $ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$1:$CIRCLE_SHA1
}

register_definition() {
Expand All @@ -273,13 +268,13 @@ register_definition() {
check_service_status() {
counter=0
sleep 60
servicestatus=`aws ecs describe-services --service $AWS_ECS_SERVICE --cluster $AWS_ECS_CLUSTER | $JQ '.services[].events[0].message'`
servicestatus=`aws ecs describe-services --service $1 --cluster $AWS_ECS_CLUSTER | $JQ '.services[].events[0].message'`
while [[ $servicestatus != *"steady state"* ]]
do
echo "Current event message : $servicestatus"
echo "Waiting for 30 seconds to check the service status...."
sleep 30
servicestatus=`aws ecs describe-services --service $AWS_ECS_SERVICE --cluster $AWS_ECS_CLUSTER | $JQ '.services[].events[0].message'`
servicestatus=`aws ecs describe-services --service $1 --cluster $AWS_ECS_CLUSTER | $JQ '.services[].events[0].message'`
counter=`expr $counter + 1`
if [[ $counter -gt $COUNTER_LIMIT ]] ; then
echo "Service does not reach steady state within 10 minutes. Please check"
Expand All @@ -290,6 +285,10 @@ check_service_status() {
}

configure_aws_cli
push_ecr_image
deploy_cluster
check_service_status
push_ecr_image $AWS_REPOSITORY
deploy_cluster $AWS_ECS_SERVICE "npm" "run" "start"

deploy_cluster $AWS_ECS_SERVICE_CONSUMERS "npm" "run" "startKafkaConsumers"

check_service_status $AWS_ECS_SERVICE
check_service_status $AWS_ECS_SERVICE_CONSUMERS
3 changes: 2 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"migrate:es": "./node_modules/.bin/babel-node migrations/seedElasticsearchIndex.js",
"prestart": "npm run -s build",
"start": "node dist",
"startKafkaConsumers": "npm run -s build && node dist/index-kafka.js",
"start:dev": "NODE_ENV=development PORT=8001 nodemon -w src --exec \"babel-node src --presets es2015\" | ./node_modules/.bin/bunyan",
"test": "NODE_ENV=test npm run lint && NODE_ENV=test npm run sync:es && NODE_ENV=test npm run sync:db && NODE_ENV=test ./node_modules/.bin/istanbul cover ./node_modules/mocha/bin/_mocha -- --timeout 10000 --compilers js:babel-core/register $(find src -path '*spec.js*')",
"test:watch": "NODE_ENV=test ./node_modules/.bin/mocha -w --compilers js:babel-core/register $(find src -path '*spec.js*')",
Expand Down Expand Up @@ -48,7 +49,7 @@
"express-request-id": "^1.1.0",
"express-sanitizer": "^1.0.2",
"express-validation": "^0.6.0",
"http-aws-es": "^1.1.3",
"http-aws-es": "^4.0.0",
"joi": "^8.0.5",
"jsonwebtoken": "^8.3.0",
"lodash": "^4.16.4",
Expand Down
51 changes: 51 additions & 0 deletions src/index-kafka.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import _ from 'lodash';
import config from 'config';
import startKafkaConsumer from './services/kafkaConsumer';
import { kafkaHandlers } from './events';
import models from './models';

const coreLib = require('tc-core-library-js');


// =======================
// Loger =========
// =======================
let appName = 'tc-projects-consumer';
switch (process.env.NODE_ENV.toLowerCase()) {
case 'development':
appName += '-dev';
break;
case 'qa':
appName += '-qa';
break;
case 'production':
default:
appName += '-prod';
break;
}

const logger = coreLib.logger({
name: appName,
level: _.get(config, 'logLevel', 'debug').toLowerCase(),
captureLogs: config.get('captureLogs'),
logentriesToken: _.get(config, 'logentriesToken', null),
});

// =======================
// Database =========
// =======================
logger.info('Registering models ... ', !!models);

/**
* Handle server shutdown gracefully
* @returns {undefined}
*/
function gracefulShutdown() {
// TODO
}
process.on('SIGTERM', gracefulShutdown);
process.on('SIGINT', gracefulShutdown);

const app = { logger, models };

module.exports = startKafkaConsumer(kafkaHandlers, app, logger);
12 changes: 6 additions & 6 deletions src/services/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

import config from 'config';
import RabbitMQService from './rabbitmq';
import startKafkaConsumer from './kafkaConsumer';
import { kafkaHandlers } from '../events';
// import startKafkaConsumer from './kafkaConsumer';
// import { kafkaHandlers } from '../events';

/**
* Responsible for establishing connections to all external services
Expand Down Expand Up @@ -33,10 +33,10 @@ module.exports = (fapp, logger) => {
.then(() => {
logger.info('RabbitMQ service initialized');
})
.then(() => startKafkaConsumer(kafkaHandlers, app, logger))
.then(() => {
logger.info('Kafka consumer service initialized');
})
// .then(() => startKafkaConsumer(kafkaHandlers, app, logger))
// .then(() => {
// logger.info('Kafka consumer service initialized');
// })
.catch((err) => {
logger.error('Error initializing services', err);
// gracefulShutdown()
Expand Down
10 changes: 5 additions & 5 deletions src/util.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import config from 'config';
import urlencode from 'urlencode';
import elasticsearch from 'elasticsearch';
import Promise from 'bluebird';
import AWS from 'aws-sdk';
// import AWS from 'aws-sdk';

import { ADMIN_ROLES, TOKEN_SCOPES } from './constants';

Expand Down Expand Up @@ -317,10 +317,10 @@ _.assignIn(util, {
apiVersion: config.get('elasticsearchConfig.apiVersion'),
hosts: esHost,
connectionClass: require('http-aws-es'), // eslint-disable-line global-require
amazonES: {
region: 'us-east-1',
credentials: new AWS.EnvironmentCredentials('AWS'),
},
// amazonES: {
// region: 'us-east-1',
// credentials: new AWS.EnvironmentCredentials('AWS'),
// },
});
} else {
esClient = new elasticsearch.Client(_.cloneDeep(config.elasticsearchConfig));
Expand Down