From ba65220127d113fb4ab54cd1afae7ab28d3a7efa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Seif=20Lotfy=20=D8=B3=D9=8A=D9=81=20=D9=84=D8=B7=D9=81?= =?UTF-8?q?=D9=8A?= Date: Fri, 11 Nov 2016 19:40:22 +0100 Subject: [PATCH] Lambda docs (#264) * Add aws.md and s3 example Signed-off-by: Seif Lotfy * minor fix --- docs/lambda/aws.md | 114 +++++++++++++++++++++++++++++++ docs/lambda/import.md | 11 ++- examples/s3/Dockerfile | 5 ++ examples/s3/Makefile | 7 ++ examples/s3/README.md | 2 + examples/s3/example-payload.json | 5 ++ examples/s3/example.js | 70 +++++++++++++++++++ 7 files changed, 211 insertions(+), 3 deletions(-) create mode 100644 docs/lambda/aws.md create mode 100644 examples/s3/Dockerfile create mode 100644 examples/s3/Makefile create mode 100644 examples/s3/README.md create mode 100644 examples/s3/example-payload.json create mode 100644 examples/s3/example.js diff --git a/docs/lambda/aws.md b/docs/lambda/aws.md new file mode 100644 index 000000000..b8fd69c23 --- /dev/null +++ b/docs/lambda/aws.md @@ -0,0 +1,114 @@ +Interacting with AWS Services +============================= + +The node.js and Python stacks include SDKs to interact with other AWS services. +For Java you will need to include any such SDK in the JAR file. + +## Credentials + +Running Lambda functions outside of AWS means that we cannot automatically get +access to other AWS resources based on Lambda subsuming the execution role +specified with the function. Instead, when using the AWS APIs inside your +Lambda function (for example, to access S3 buckets), you will need to pass +these credentials explicitly. + +### Using environment variables for the credentials + +The easiest way to do this is to pass the `AWS_ACCESS_KEY_ID` and +`AWS_SECRET_ACCESS_KEY` environment while creating or importing the lambda function from aws. + +This can be done as follows: + +```sh +export aws_access_key_id= +export aws_secret_access_key= + +./fnctl lambda create-function /s3 nodejs example.run examples/s3/example.js examples/s3/example-payload.json --config aws_access_key_id --config aws_secret_access_key +``` + +or + +```sh +./fnctl lambda create-function /s3 nodejs example.run ../../lambda/examples/s3/example.js ../../lambda/examples/s3/example-payload.json --config aws_access_key_id= --config aws_secret_access_key= +``` + +The various AWS SDKs will automatically pick these up. + +## Example: Reading and writing to S3 Bucket + +This example demonstrates modifying S3 buckets and using the included +ImageMagick tools in a node.js function. Our function will fetch an image +stored in a key specified by the event, resize it to a width of 1024px and save +it to another key. + +The code for this example is located [here](../../examples/s3/example.js). + +The event will look like: + +```js +{ + "bucket": "iron-lambda-demo-images", + "srcKey": "waterfall.jpg", + "dstKey": "waterfall-1024.jpg" +} +``` + +The setup, imports and SDK initialization. + +```js +var im = require('imagemagick'); +var fs = require('fs'); +var AWS = require('aws-sdk'); + +exports.run = function(event, context) { + var bucketName = event['bucket'] + var srcImageKey = event['srcKey'] + var dstImageKey = event['dstKey'] + + var s3 = new AWS.S3(); +} +``` + +First we retrieve the source and write it to a local file so ImageMagick can +work with it. + +```js +s3.getObject({ + Bucket: bucketName, + Key: srcImageKey + }, function (err, data) { + + if (err) throw err; + + var fileSrc = '/tmp/image-src.dat'; + var fileDst = '/tmp/image-dst.dat' + fs.writeFileSync(fileSrc, data.Body) + +}); +``` + +The actual resizing involves using the identify function to get the current +size (we only resize if the image is wider than 1024px), then doing the actual +conversion to `fileDst`. Finally we upload to S3. + +```js +im.identify(fileSrc, function(err, features) { + resizeIfRequired(err, features, fileSrc, fileDst, function(err, resized) { + if (err) throw err; + if (resized) { + s3.putObject({ + Bucket:bucketName, + Key: dstImageKey, + Body: fs.createReadStream(fileDst), + ContentType: 'image/jpeg', + ACL: 'public-read', + }, function (err, data) { + if (err) throw err; + context.done() + }); + } else { + context.done(); + } + }); +}); +``` \ No newline at end of file diff --git a/docs/lambda/import.md b/docs/lambda/import.md index d16fb6dea..ef88ef788 100644 --- a/docs/lambda/import.md +++ b/docs/lambda/import.md @@ -36,8 +36,7 @@ Assuming you have a lambda with the following arn `arn:aws:lambda:us-west-2:1231 fnctl lambda aws-import arn:aws:lambda:us-west-2:123141564251:function:my-function us-east-1 user/my-function ``` -will import the function code from the region `us-east-1` to a directory called `./my-function`. It will -then create a docker image called `my-function`. +will import the function code from the region `us-east-1` to a directory called `./user/my-function`. Inside the directory you will find the `function.yml`, `Dockerfile`, and all the files needed for running the function. Using Lambda with Docker Hub and IronFunctions requires that the Docker image be named `/`. This is used to uniquely identify @@ -47,4 +46,10 @@ name>` as the image name with `aws-import` to create a correctly named image. If you only want to download the code, pass the `--download-only` flag. The `--profile` flag is available similar to the `aws` tool to help you tweak the settings on a command level. Finally, you can import a different version of your lambda function than the latest one -by passing `--version .` \ No newline at end of file +by passing `--version .` + +You can then publish the imported lambda as follows: +``` +./fnctl publish -d ./user/my-function +```` +Now the function can be reached via ```http://$HOSTNAME/r/user/my-function``` \ No newline at end of file diff --git a/examples/s3/Dockerfile b/examples/s3/Dockerfile new file mode 100644 index 000000000..528e257bf --- /dev/null +++ b/examples/s3/Dockerfile @@ -0,0 +1,5 @@ +FROM iron/lambda-nodejs + +ADD example.js ./example.js + +CMD ["example.run"] diff --git a/examples/s3/Makefile b/examples/s3/Makefile new file mode 100644 index 000000000..188e7429d --- /dev/null +++ b/examples/s3/Makefile @@ -0,0 +1,7 @@ +IMAGE=iron/lambda-node-aws-example + +create: Dockerfile + docker build -t $(IMAGE) . + +test: + docker run --rm -it -e PAYLOAD_FILE=/mnt/example-payload.json -e AWS_ACCESS_KEY_ID=change-here -e AWS_SECRET_ACCESS_KEY=change-here -v `pwd`:/mnt $(IMAGE) diff --git a/examples/s3/README.md b/examples/s3/README.md new file mode 100644 index 000000000..a0f93cbf6 --- /dev/null +++ b/examples/s3/README.md @@ -0,0 +1,2 @@ +Example on how to use AWS S3 in a lambda function. + diff --git a/examples/s3/example-payload.json b/examples/s3/example-payload.json new file mode 100644 index 000000000..a832d6eca --- /dev/null +++ b/examples/s3/example-payload.json @@ -0,0 +1,5 @@ +{ + "bucket": "iron-lambda-demo-images", + "srcKey": "waterfall.jpg", + "dstKey": "waterfall-1024.jpg" +} diff --git a/examples/s3/example.js b/examples/s3/example.js new file mode 100644 index 000000000..2f4c3deef --- /dev/null +++ b/examples/s3/example.js @@ -0,0 +1,70 @@ +var im = require('imagemagick'); +var fs = require('fs'); +var AWS = require('aws-sdk'); + +// cb(err, resized) is called with true if resized. +function resizeIfRequired(err, features, fileSrc, fileDst, cb) { + if (err) { + cb(err, false); + return; + } + + var targetWidth = 1024; + if (features.width > targetWidth) + { + im.resize({ + srcPath : fileSrc, + dstPath : fileDst, + width : targetWidth, + format: 'jpg' + }, function(err) { + if (err) { + cb(err, false); + } else { + cb(null, true); + } + }); + } else { + cb(null, false); + } +} + +exports.run = function(event, context) { + var bucketName = event['bucket'] + var srcImageKey = event['srcKey'] + var dstImageKey = event['dstKey'] + + var s3 = new AWS.S3(); + + s3.getObject({ + Bucket: bucketName, + Key: srcImageKey + }, function (err, data) { + + if (err) throw err; + + var fileSrc = '/tmp/image-src.dat'; + var fileDst = '/tmp/image-dst.dat' + fs.writeFileSync(fileSrc, data.Body) + + im.identify(fileSrc, function(err, features) { + resizeIfRequired(err, features, fileSrc, fileDst, function(err, resized) { + if (err) throw err; + if (resized) { + s3.putObject({ + Bucket:bucketName, + Key: dstImageKey, + Body: fs.createReadStream(fileDst), + ContentType: 'image/jpeg', + ACL: 'public-read', + }, function (err, data) { + if (err) throw err; + context.succeed("Image updated"); + }); + } else { + context.succeed("Image not updated"); + } + }); + }); + }); +}