Automating Image Compression Using S3 & Lambda

For those who leverage images heavily, there are cases where you might want to serve compressed images instead of the originals to boost performance. For images being stored on Amazon S3, it’d be nice if you didn’t have to manually compress these yourself as they get uploaded. In this post, I’ll show how you can automate the compression of your images in S3 using AWS Lambda and the ImageMagick Node.js library that’s already built-in to the Lamdba runtime for Node.js.

S3 Image Compression

The following assumes you’ve created the buckets in S3 where your images should be managed. Feel free to leverage suffixes and prefixes for your own usage where applicable.

Creating an IAM policy for access permissions:

  1. Navigate to IAM in your management console.
  2. Select “Policies” in the sidebar.
  3. Click “Create Policy”.
  4. Select “Create Your Own Policy”.
  5. Enter an appropriate policy name and description.
  6. Paste the following JSON into the policy document:
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    {
        "Version": "2012-10-17",
        "Statement": [
            {
                "Effect": "Allow",
                "Action": [
                    "s3:GetObject"
                ],
                "Resource": [
                    "Your Source Bucket ARN/*"
                ]
            },
            {
                "Effect": "Allow",
                "Action": [
                    "s3:PutObject"
                ],
                "Resource": [
                    "Your Destination Bucket ARN/*"
                ]
            }
        ]
    }
  7. Substitute “Your Source Bucket ARN” with the ARN for the S3 bucket that you’ll be uploading your original, uncompressed objects to. Make sure you add “/*” after the bucket ARN. For instance, if your bucket ARN was “arn:aws:s3:::sufferforyourbetrayal”, you would use “arn:aws:s3:::sufferforyourbetrayal/*”.
  8. Substitute “Your Destination Bucket ARN” with the ARN for the S3 bucket where you want your compressed objects to end up. Make sure you add “/*” after the bucket ARN. For instance, if your bucket ARN was “arn:aws:s3:::sufferforyourbetrayal”, you would use “arn:aws:s3:::sufferforyourbetrayal/*”.
  9. Click “Create Policy”.

Creating the IAM role for the Lambda function:

  1. Select “Roles” in the sidebar.
  2. Click “Create New Role”.
  3. Configure the role type such that it us an AWS Service Role for AWS Lambda, attach the policy you just created to it, name it, and continue.

Creating the Lambda function:

  1. Navigate to Lambda in your management console.
  2. Click “Create a Lambda function”.
  3. Select the “Blank Function” blueprint.
  4. Under “Configure triggers”, click the grey box and select “S3”.
  5. Select the source bucket where original, uncompressed objects will be uploaded for the Bucket.
  6. Select the appropriate Event type. For example, “Put”.
  7. Enter a Prefix and/or Suffix if you want. I left mine blank.
  8. Check the box to “Enable trigger” and click “Next”.
  9. Click “Next”.
  10. Enter an appropriate function name and description. Select Node.js 6.10 for the runtime.
  11. Under “Lambda function code”, select “Edit code inline” for the Code entry type and paste the following code in the box:
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    var AWS = require("aws-sdk");
    var IM = require('imagemagick');
    var FS = require('fs');
    var compressedJpegFileQuality = 0.80;
    var compressedPngFileQuality = 0.95;
    exports.handler = (event, context, callback) => {
        var s3 = new AWS.S3();
        var sourceBucket = "Source Bucket Name";
        var destinationBucket = "Destination Bucket Name";
        var objectKey = event.Records[0].s3.object.key;
        var getObjectParams = {
            Bucket: sourceBucket,
            Key: objectKey
        };
        s3.getObject(getObjectParams, function(err, data) {
            if (err) {
                console.log(err, err.stack);
            } else {
                console.log("S3 object retrieval get successful.");
                var resizedFileName = "/tmp/" + objectKey;
                var quality;
                if (resizedFileName.toLowerCase().includes("png")){
                    quality = compressedPngFileQuality;
                }
                else {
                    quality = compressedJpegFileQuality;
                }
                var resize_req = { width:"100%", height:"100%", srcData:data.Body, dstPath: resizedFileName, quality: quality, progressive: true, strip: true };
                IM.resize(resize_req, function(err, stdout) {
                    if (err) {
                        throw err;
                    }
                    console.log('stdout:', stdout);
                    var content = new Buffer(FS.readFileSync(resizedFileName));
                    var uploadParams = { Bucket: destinationBucket, Key: objectKey, Body: content, ContentType: data.ContentType, StorageClass: "STANDARD" };
                    s3.upload(uploadParams, function(err, data) {
                        if (err) {
                            console.log(err, err.stack);
                        } else{
                            console.log("S3 compressed object upload successful.");
                        }
                    });
                });
            }
        });
    };
  12. Substitute “Source Bucket Name” with the name of the buckets the original, uncompressed objects will be uploaded to.
  13. Substitute “Destination Bucket Name” with the name of the bucket the compressed objects should end up.
  14. Leave Handler as “index.handler”.
  15. Choose to use an existing role and select the IAM role you created earlier.
  16. Under the Advanced Settings, you may want to allocate additional memory or increase the timeout to fit your usage.
  17. Finish the wizard.

working lambda script:

var AWS = require(“aws-sdk”);
var IM = require(‘imagemagick’);
var FS = require(‘fs’);
var compressedJpegFileQuality = 0.80;
var compressedPngFileQuality = 0.95;

exports.handler = (event, context, callback) => {
var s3 = new AWS.S3();
var sourceBucket = “stressed”;
var destinationBucket = “frustrated”;
var objectKey = event.Records[0].s3.object.key;

var getObjectParams = {
Bucket: sourceBucket,
Key: objectKey
};
s3.getObject(getObjectParams, function(err, data) {
if (err) {
console.log(err, err.stack);
} else {
console.log(“S3 object retrieval get successful.”);
var resizedFileName = “/tmp/” + objectKey;
var quality;
if (resizedFileName.toLowerCase().includes(“png”)){
quality = compressedPngFileQuality;
}
else {
quality = compressedJpegFileQuality;
}
var resize_req = { width:”100%”, height:”100%”, srcData:data.Body, dstPath: resizedFileName, quality: quality, progressive: true, strip: true };
IM.resize(resize_req, function(err, stdout) {
if (err) {
throw err;
}
console.log(‘stdout:’, stdout);
var content = new Buffer(FS.readFileSync(resizedFileName));
var uploadParams = { Bucket: destinationBucket, Key: objectKey, Body: content, ContentType: data.ContentType, StorageClass: “STANDARD” };
s3.upload(uploadParams, function(err, data) {
if (err) {
console.log(err, err.stack);
} else{
console.log(“S3 compressed object upload successful.”);
}
});
});
}
});
};

 

Advertisements

Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out /  Change )

Google+ photo

You are commenting using your Google+ account. Log Out /  Change )

Twitter picture

You are commenting using your Twitter account. Log Out /  Change )

Facebook photo

You are commenting using your Facebook account. Log Out /  Change )

w

Connecting to %s