Skip to content

Add index provisioning support #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
*desktop.ini
*.jar
.netbeans*
target/*/
target/*/
*~
.DS_Store
.idea
*.swp
*.swo
*.iml
16 changes: 14 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,17 @@
# lambdaDynamodbScaler
An AWS Lambda function to scale DynamoDB tables on an hourly schedule
An AWS Lambda function to scale DynamoDB tables and global secondary indexes on an hourly schedule

More info can be found
[here](https://medium.com/@quodlibet_be/using-aws-lambda-scheduled-tasks-to-scale-dynamodb-c65a336aca4f).

More info can be found here : https://medium.com/@quodlibet_be/using-aws-lambda-scheduled-tasks-to-scale-dynamodb-c65a336aca4f


To deploy with Serverless : http://serverless.com

npm install -g serverless

sls deploy




14 changes: 7 additions & 7 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -7,30 +7,30 @@
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-dynamodb</artifactId>
<version>1.10.16</version>
<version>1.11.147</version>
</dependency>

<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-cloudwatch</artifactId>
<version>1.10.16</version>
<version>1.11.147</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-lambda-java-core</artifactId>
<version>1.0.0</version>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.10</version>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies>
Expand All @@ -42,7 +42,7 @@
<version>2.3</version>
<configuration>
<createDependencyReducedPom>false</createDependencyReducedPom>
<finalName>uber-${artifactId}-${version}</finalName>
<finalName>uber-${project.artifactId}-${project.version}</finalName>
</configuration>
<executions>
<execution>
Expand Down
22 changes: 14 additions & 8 deletions scaler.properties
Original file line number Diff line number Diff line change
@@ -1,22 +1,28 @@
tablenames=example_scaler,example_scaler2
indexnames=example_scaler%index_one
#Table 1, changes 4 times per day
# from 01 to 07 - Low usage
01.example_scaler.read=2
01.example_scaler.write=2
#from 07 to 09 - Medium usage
05.example_scaler.read=6
05.example_scaler.write=6
#from 09 to 18 - High usage
07.example_scaler.read=20
07.example_scaler.write=20
#from 18 to 01 - Medium usage
07.example_scaler.read=6
07.example_scaler.write=6
#from 09 to 18 - High usage
09.example_scaler.read=20
09.example_scaler.write=20
#from 18 to 01 - Medium usage
18.example_scaler.read=6
18.example_scaler.write=6


#Table 1 Index 1 changes 2 times per day
#Index only has usage between 7 and 9
07.example_scaler%index_one.read=6
07.example_scaler%index_one.write=6
09.example_scaler%index_one.read=1
09.example_scaler%index_one.write=1

#Table 2, changes 2 times per day
#Table only as usage between 6 and 8
#Table only has usage between 6 and 8
05.example_scaler2.read=10
05.example_scaler2.write=10
09.example_scaler2.read=1
Expand Down
60 changes: 60 additions & 0 deletions serverless.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
service: lambdaDynamodbScaler

frameworkVersion: ">=1.2.0 <2.0.0"

custom:
myStage: ${opt:stage, self:provider.stage}
jarPath: target/uber-LambdaDynamoDBScaler-1.0-SNAPSHOT.jar
bucket: my-bucket-lambda-dynamodb-scaler-${self:custom.myStage}
dev:
region: us-east-1
prod:
region: us-east-1


provider:
name: aws
stage: dev
runtime: java8
region: ${self:custom.${self:custom.myStage}.region}
environment:
STAGE: ${self:custom.myStage}
BUCKET: ${self:custom.bucket}
iamRoleStatements:
- Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
Resource: "*"
- Effect: Allow
Action:
- dynamodb:ListTables
- dynamodb:DescribeTable
- dynamodb:UpdateTable
Resource: "*"
- Effect: Allow
Action:
- s3:GetObject
Resource: arn:aws:s3:::${self:custom.bucket}/*

package:
artifact: ${self:custom.jarPath}

functions:
scaler:
handler: be.quodlibet.lambdadynamodbscaler.Scaler::scale
memorySize: 128
timeout: 30
events:
- schedule: rate(1 hour)

resources:
Resources:
## Specifying the S3 Bucket
ConfigS3Bucket:
Type: AWS::S3::Bucket
Properties:
BucketName: ${self:custom.bucket}
VersioningConfiguration:
Status: Enabled
121 changes: 96 additions & 25 deletions src/main/java/be/quodlibet/lambdadynamodbscaler/Scaler.java
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
package be.quodlibet.lambdadynamodbscaler;

import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodbv2.document.DynamoDB;
import com.amazonaws.services.dynamodbv2.document.Table;
import com.amazonaws.services.dynamodbv2.model.GlobalSecondaryIndexDescription;
import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput;
import com.amazonaws.services.dynamodbv2.model.TableDescription;
import com.amazonaws.services.lambda.runtime.Context;
Expand All @@ -15,29 +16,34 @@
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;

import java.io.IOException;
import java.util.Calendar;
import java.util.Objects;
import java.util.Optional;
import java.util.Properties;

/**
*
* @author Dries Horions <dries@quodlibet.be>
*/
public class Scaler
{
private static final String access_key_id = "ACCESSKEY";
private static final String secret_access_key = "SECRET";
private static final String configBucketName = "BUCKETNAME";
private static final String configBucketName = Optional
.ofNullable(System.getenv("BUCKETNAME"))
.orElse("default-bucket-name-replace");
private static final String configKey = "scaler.properties";
private static final Regions region = Regions.EU_WEST_1;
private static final Regions region = Regions.fromName(
Optional
.ofNullable(System.getenv("AWS_DEFAULT_REGION"))
.orElse("us-east-1")
);

private BasicAWSCredentials awsCreds;
private Properties ScalingProperties;
private AmazonS3 s3Client;
private AmazonDynamoDBClient clnt;
private DynamoDB dynamoDB;
private LambdaLogger log;

public Response scale(Object input, Context context)
{
if (context != null)
Expand All @@ -61,30 +67,70 @@ public Response scale(Object input, Context context)
String readProp = hour + "." + tableName + ".read";
String writeProp = hour + "." + tableName + ".write";
if (ScalingProperties.containsKey(readProp)
&& ScalingProperties.containsKey(writeProp))
&& ScalingProperties.containsKey(writeProp))
{
String readCapacity = (String) ScalingProperties.getProperty(readProp);
String writeCapacity = (String) ScalingProperties.getProperty(writeProp);
String readCapacity = ScalingProperties.getProperty(readProp);
String writeCapacity = ScalingProperties.getProperty(writeProp);
//Execute the scaling change
message += scaleTable(tableName, Long.parseLong(readCapacity), Long.parseLong(writeCapacity));
}
else
{
log("No values found for table " + tableName );
message += "\nNo values found for table " + tableName + "\n";
log(tableName + "\n No values found for table.\n");
message += tableName + "\n No values found for table.\n";
}
}
}
else
{
log("tables parameter not found in properties file");
log("tableNames parameter not found in properties file");
}
//Get the index names
if (ScalingProperties.containsKey("indexnames"))
{
String value = (String) ScalingProperties.get("indexnames");
String[] tableAndIndexNames = value.split(",");
for (String tableAndIndexName : tableAndIndexNames)
{
String[] split = tableAndIndexName.split("%");
if (split.length == 2)
{
String tableName = split[0];
String indexName = split[1];
//Check if there is a change requested for this hour
String readProp = hour + "." + tableAndIndexName + ".read";
String writeProp = hour + "." + tableAndIndexName + ".write";
if (ScalingProperties.containsKey(readProp) && ScalingProperties.containsKey(writeProp))
{
String readCapacity = ScalingProperties.getProperty(readProp);
String writeCapacity = ScalingProperties.getProperty(writeProp);
//Execute the scaling change
message += scaleIndex(tableName, indexName, Long.parseLong(readCapacity), Long.parseLong(writeCapacity));
}
else
{
log("No values found for index " + tableAndIndexName);
message += "\nNo values found for index " + tableAndIndexName + "\n";
}
}
else
{
message += tableAndIndexName + "\n Index name in wrong format (tableName:indexName).\n";
}
}
}
else
{
log("indexNames parameter not found in properties file");
}
log(message);
Response response = new Response(true, message);
return response;
}

/**
* Ensure we can also test this locally without context
*
* @param message
*/
private void log(String message)
Expand All @@ -107,48 +153,73 @@ private String scaleTable(String tableName, Long readCapacity, Long writeCapacit
tp.setWriteCapacityUnits(writeCapacity);
TableDescription d = table.describe();
if (!Objects.equals(d.getProvisionedThroughput().getReadCapacityUnits(), readCapacity)
|| !Objects.equals(d.getProvisionedThroughput().getWriteCapacityUnits(), writeCapacity))
|| !Objects.equals(d.getProvisionedThroughput().getWriteCapacityUnits(), writeCapacity))
{
d = table.updateTable(tp);
return tableName + "\nRequested read/write : " + readCapacity + "/" + writeCapacity
+ "\nCurrent read/write :" + d.getProvisionedThroughput().getReadCapacityUnits() + "/" + d.getProvisionedThroughput().getWriteCapacityUnits()
+ "\nStatus : " + d.getTableStatus() + "\n";
+ "\nCurrent read/write :" + d.getProvisionedThroughput().getReadCapacityUnits() + "/"
+ d.getProvisionedThroughput().getWriteCapacityUnits() + "\nStatus : " + d.getTableStatus() + "\n";
}
else
{
return tableName + "\n Requested throughput equals current throughput\n";
}
}
private void setup()

private String scaleIndex(String tableName, String indexName, Long readCapacity, Long writeCapacity)
{
//Setup credentials
if (awsCreds == null)
Table table = dynamoDB.getTable(tableName);
ProvisionedThroughput tp = new ProvisionedThroughput();
tp.setReadCapacityUnits(readCapacity);
tp.setWriteCapacityUnits(writeCapacity);
TableDescription d = table.describe();
for (GlobalSecondaryIndexDescription indexDescription : d.getGlobalSecondaryIndexes())
{
awsCreds = new BasicAWSCredentials(access_key_id, secret_access_key);
if (Objects.equals(indexDescription.getIndexName(), indexName))
{
if (!Objects.equals(indexDescription.getProvisionedThroughput().getReadCapacityUnits(), readCapacity)
|| !Objects.equals(indexDescription.getProvisionedThroughput().getWriteCapacityUnits(), writeCapacity))
{
d = table.getIndex(indexName).updateGSI(tp);
return tableName + ":" + indexName + "\nRequested read/write : " + readCapacity + "/" + writeCapacity
+ "\nCurrent read/write :" + d.getProvisionedThroughput().getReadCapacityUnits() + "/"
+ d.getProvisionedThroughput().getWriteCapacityUnits() + "\nStatus : " + d.getTableStatus()
+ "\n";
}
else
{
return tableName + ":" + indexName + "\n Requested throughput equals current throughput.\n";
}
}
}
return tableName + ":" + indexName + "\n No index found.\n";
}

private void setup()
{
//Setup S3 client
if (s3Client == null)
{
s3Client = new AmazonS3Client(awsCreds);
s3Client = new AmazonS3Client();
}
//Setup DynamoDB client
if (clnt == null)
{
clnt = new AmazonDynamoDBClient(awsCreds);
clnt = new AmazonDynamoDBClient(new DefaultAWSCredentialsProviderChain());
dynamoDB = new DynamoDB(clnt);
clnt.setRegion(Region.getRegion(region));
}
//Load properties from S3
if (ScalingProperties == null)
{
try
{
try
{
ScalingProperties = new Properties();
S3Object object = s3Client.getObject(new GetObjectRequest(configBucketName, configKey));
S3ObjectInputStream stream = object.getObjectContent();
ScalingProperties.load(stream);
}
catch (IOException ex)
catch (IOException ex)
{
log("Failed to read config file : " + configBucketName + "/" + configKey + "(" + ex.getMessage() + ")");
}
Expand Down