CDK Lambda
from aws_cdk import (
    # Duration,
    Stack,
    aws_lambda as _lambda
    # aws_sqs as sqs,
)

from constructs import Construct

class FournewdockergenaiStack(Stack):

    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.build_lambda_func()

    def build_lambda_func(self):
        self.prediction_lambda = _lambda.DockerImageFunction(
            scope=self,
            id="ExampleDockerLambda",
            # Function name on AWS
            function_name="ExampleDockerLambda",
            # Use aws_cdk.aws_lambda.DockerImageCode.from_image_asset to build
            # a docker image on deployment
            code=_lambda.DockerImageCode.from_image_asset(
                # Directory relative to where you execute cdk deploy
                # contains a Dockerfile with build instructions
                directory="fournewdockergenai/ExampleDockerLambda"
            ),
        )

        # The code that defines your stack goes here

        # example resource
        # queue = sqs.Queue(
        #     self, "FournewdockergenaiQueue",
        #     visibility_timeout=Duration.seconds(300),
        # )
  


Dockerfile
FROM amazon/aws-lambda-python:latest

# Installs python, removes cache file to make things smaller
#RUN yum update -y
#RUN yum install -y python3 python3-dev python3-pip gcc
#RUN rm -Rf /var/cache/yum

LABEL maintainer="RagEventGenAi"
RUN dnf update -y
RUN dnf install -y python3 python3-pip
COPY requirements.txt ./
RUN pip install -r requirements.txt



# Copies requirements.txt file into the container
#COPY requirements.txt ./
# Installs dependencies found in your requirements.txt file
#RUN pip install -r requirements.txt

# Be sure to copy over the function itself!
# Goes last to take advantage of Docker caching.
COPY example_docker_lambda.py ./

# Points to the handler function of your lambda function
CMD ["example_docker_lambda.handler"]
  


Requirements.txt
llama-index
llama-index-llms-bedrock
llama-index-embeddings-bedrock
  


Lambda
    
import os
import json
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, load_index_from_storage, StorageContext
from llama_index.llms.bedrock import Bedrock
from llama_index.embeddings.bedrock import BedrockEmbedding
import urllib.request
import time


Settings.embed_model = BedrockEmbedding(model="amazon.titan-embed-g1-text-02", region_name="us-east-1")
Settings.llm = Bedrock(model="amazon.titan-text-express-v1", region_name="us-east-1")
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/Bolivia.txt"
file_Path = '/tmp/1.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/ElectricPlants.txt"
file_Path = '/tmp/2.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/Hurricane.txt"
file_Path = '/tmp/3.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/IndiaSeismic.txt"
file_Path = '/tmp/4.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/NatGasPIpeline.txt"
file_Path = '/tmp/5.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/OceanFloor.txt"
file_Path = '/tmp/6.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/OilPipelines.txt"
file_Path = '/tmp/7.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/PowerStations.txt"
file_Path = '/tmp/8.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/SevereWeather.txt"
file_Path = '/tmp/9.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/Tornado.txt"
file_Path = '/tmp/10.txt'
urllib.request.urlretrieve(url, file_Path)
url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/WildFires.txt"
file_Path = '/tmp/11.txt'
urllib.request.urlretrieve(url, file_Path)
time.sleep(1)
documents = SimpleDirectoryReader("/tmp/").load_data()
index = VectorStoreIndex.from_documents(documents)


def handler(event, context):
    #Create Index
    #url = 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt'
    #file_Path = '/tmp/paul_graham_essay.txt'
    #urllib.request.urlretrieve(url, file_Path)
    """
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/Bolivia.txt"
    file_Path = '/tmp/1.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/ElectricPlants.txt"
    file_Path = '/tmp/2.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/Hurricane.txt"
    file_Path = '/tmp/3.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/IndiaSeismic.txt"
    file_Path = '/tmp/4.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/NatGasPIpeline.txt"
    file_Path = '/tmp/5.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/OceanFloor.txt"
    file_Path = '/tmp/6.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/OilPipelines.txt"
    file_Path = '/tmp/7.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/PowerStations.txt"
    file_Path = '/tmp/8.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/SevereWeather.txt"
    file_Path = '/tmp/9.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/Tornado.txt"
    file_Path = '/tmp/10.txt'
    urllib.request.urlretrieve(url, file_Path)
    url = "https://d1z6izzrysvh92.cloudfront.net/ragdata/WildFires.txt"
    file_Path = '/tmp/11.txt'
    urllib.request.urlretrieve(url, file_Path)
    time.sleep(1)
    documents = SimpleDirectoryReader("/tmp/").load_data()
    index = VectorStoreIndex.from_documents(documents)
    """
    Query = "Give me a feature service url for imagery in bolivia"

    try:
        Query = str(event['queryStringParameters']['query'])
    except Exception as e:
        Query = "Give me a feature service url for imagery in bolivia"

    Model = "amazon.titan-text-express-v1"

    try:
        Model = str(event['queryStringParameters']['model'])
        Settings.llm = Bedrock(model=Model, region_name="us-east-1")
    except Exception as e:
        print("no model")

    #Save index (i saw some s3 support for this?)
    #index.storage_context.persist(persist_dir="~/persist")

    #Load index
    #index = load_index_from_storage(StorageContext.from_defaults(persist_dir="~/persist/"))

    #query
    query_engine = index.as_query_engine()
    response = query_engine.query(Query)
    print(response)
    lst = []
    lst.append(str(response))
    #return response
    jsonpayload = ""
    jsonpayload = '{"text":' + json.dumps(lst) + '}'
    #json.loads(jsonpayload)
    #print(jsonpayload)
    return { 'statusCode': 200,  'body': jsonpayload, 'headers': {'Access-Control-Allow-Origin':'*','Access-Control-Allow-Methods':'*','Content-type':'application/json' } }