This content originally appeared on DEV Community and was authored by Salam Shaik
Hi everyone,
This article helps you build a chatbot that can suggest movies based on your prompt, provide you with the movie details, and maintain context throughout the chat session.
The AWS services I have used to build this solution are:
AWS Bedrock — Nova Pro model — Converse API
Lambda
API Gateway
EC2 for running an Elastic Search container
S3 for static web hosting
CloudFront for CDN
Route 53 for DNS
CloudWatch for Logging
DynamoDB for storing Sessions and Data
Infrastructure Overview of the Chatbot Platform
Let’s start the implementation. I divided this infrastructure into 3 parts
Front-end deployment
API Layer
Backend Services
Deploying Front-end services:
- Created an S3 bucket with the domain name I have and enabled S3 static web hosting from the properties of the bucket
- Bucket Policy for static web hosting
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicReadGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::chitrangi.cloudnirvana.in/*"
}
]
}
- Requested an SSL certificate from the Certificate Manager for the domain chitrangi.cloudnirvana.in
- Created a distribution in the CloudFront with this certificate and an S3 bucket hosting
- Created a record for the subdomain in Route 53 and pointed it to CloudFront
- Now everything is ready, let’s upload the index.html file to the S3 bucket. Here is the code for the HTML file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>🎬 Chitrangi - Movie Chatbot</title>
<style>
body {
font-family: 'Segoe UI', sans-serif;
background-color: #f2f2f2;
margin: 0;
padding: 0;
}
#chat-container {
width: 90%;
max-width: 800px;
margin: 30px auto;
background: #fff;
border-radius: 8px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
padding: 20px;
position: relative;
}
h2 {
margin: 0 0 5px 0;
}
#session-info {
font-size: 13px;
color: #666;
margin-bottom: 10px;
}
#info-box {
background-color: #e9f7ef;
border-left: 5px solid #28a745;
padding: 10px 15px;
margin-bottom: 15px;
border-radius: 5px;
font-size: 14px;
}
#messages {
height: 500px;
overflow-y: scroll;
padding: 10px;
border: 1px solid #ddd;
border-radius: 5px;
background-color: #f9f9f9;
}
.message {
margin: 10px 0;
padding: 10px 15px;
border-radius: 18px;
max-width: 75%;
display: inline-block;
word-wrap: break-word;
animation: fadeIn 0.3s ease;
}
.user {
background-color: #007bff;
color: white;
margin-left: auto;
text-align: right;
}
.bot {
background-color: #e0ffe0;
color: #333;
text-align: left;
margin-right: auto;
}
.typing-indicator-bubble {
background-color: #e0ffe0;
color: #333;
padding: 10px 15px;
border-radius: 18px;
max-width: 150px;
margin: 10px 0;
display: flex;
gap: 4px;
justify-content: center;
align-items: center;
}
.dot {
width: 8px;
height: 8px;
background-color: #888;
border-radius: 50%;
animation: bounce 1.4s infinite;
}
.dot:nth-child(2) {
animation-delay: 0.2s;
}
.dot:nth-child(3) {
animation-delay: 0.4s;
}
@keyframes bounce {
0%, 80%, 100% { transform: scale(0); }
40% { transform: scale(1); }
}
#input-container {
margin-top: 15px;
display: flex;
gap: 10px;
}
#input {
flex: 1;
padding: 10px;
font-size: 16px;
border-radius: 5px;
border: 1px solid #ccc;
}
#send {
padding: 10px 20px;
font-size: 16px;
background: #007bff;
color: white;
border: none;
border-radius: 5px;
cursor: pointer;
}
#send:hover {
background: #0056b3;
}
#reset {
position: absolute;
top: 20px;
right: 20px;
background: #dc3545;
color: white;
border: none;
padding: 8px 16px;
border-radius: 5px;
cursor: pointer;
}
#reset:hover {
background: #a71d2a;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(10px); }
to { opacity: 1; transform: translateY(0); }
}
</style>
</head>
<body>
<div id="chat-container">
<h2>🎬 Chitrangi - Movie Chatbot</h2>
<div id="session-info">Session ID: <code id="session-id"></code></div>
<div id="info-box">
<strong>What Chitrangi can do:</strong>
<ul style="margin: 5px 0 0 15px;">
<li>🎥 Suggest movies by genre or mood</li>
<li>📖 Provide movie details like synopsis and cast</li>
<li>🚫 Will not answer non-movie questions</li>
</ul>
</div>
<button id="reset" onclick="resetSession()">Start New Chat</button>
<div id="messages"></div>
<div id="input-container">
<input type="text" id="input" placeholder="Ask for a movie, like 'Suggest a thriller'...">
<button id="send">Send</button>
</div>
</div>
<script>
const apiUrl = ""; // Replace with your API endpoint
let sessionId = localStorage.getItem("chitrangi_session_id");
if (!sessionId) {
sessionId = crypto.randomUUID();
localStorage.setItem("chitrangi_session_id", sessionId);
}
document.getElementById("session-id").textContent = sessionId;
function resetSession() {
localStorage.removeItem("chitrangi_session_id");
sessionId = crypto.randomUUID();
localStorage.setItem("chitrangi_session_id", sessionId);
document.getElementById("session-id").textContent = sessionId;
document.getElementById("messages").innerHTML = "";
}
const messagesDiv = document.getElementById('messages');
const inputField = document.getElementById('input');
const sendButton = document.getElementById('send');
function appendMessage(text, sender) {
const div = document.createElement('div');
div.classList.add('message', sender);
div.textContent = text;
const wrapper = document.createElement('div');
wrapper.style.display = 'flex';
wrapper.style.justifyContent = sender === 'user' ? 'flex-end' : 'flex-start';
wrapper.appendChild(div);
messagesDiv.appendChild(wrapper);
messagesDiv.scrollTop = messagesDiv.scrollHeight;
}
function showTypingBubble() {
const typingBubble = document.createElement('div');
typingBubble.classList.add('typing-indicator-bubble');
typingBubble.id = 'typing-bubble';
typingBubble.innerHTML = `<div class="dot"></div><div class="dot"></div><div class="dot"></div>`;
const wrapper = document.createElement('div');
wrapper.style.display = 'flex';
wrapper.style.justifyContent = 'flex-start';
wrapper.appendChild(typingBubble);
messagesDiv.appendChild(wrapper);
messagesDiv.scrollTop = messagesDiv.scrollHeight;
}
function removeTypingBubble() {
const typingBubble = document.getElementById('typing-bubble');
if (typingBubble && typingBubble.parentElement) typingBubble.parentElement.remove();
}
async function typeMessage(text, sender) {
const wrapper = document.createElement('div');
wrapper.style.display = 'flex';
wrapper.style.justifyContent = sender === 'user' ? 'flex-end' : 'flex-start';
const div = document.createElement('div');
div.classList.add('message', sender);
div.style.whiteSpace = 'pre-wrap';
div.innerHTML = text.replace(/(?<=\n|^)\d+\.\s(.*)/g, (_, title) => `• <strong>${title}</strong>`);
wrapper.appendChild(div);
messagesDiv.appendChild(wrapper);
messagesDiv.scrollTop = messagesDiv.scrollHeight;
}
async function sendMessage() {
const userInput = inputField.value.trim();
if (!userInput) return;
appendMessage(userInput, 'user');
inputField.value = '';
showTypingBubble();
try {
const response = await fetch(apiUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ message: userInput, session_id: sessionId })
});
const data = await response.json();
removeTypingBubble();
await typeMessage(data.response, 'bot');
} catch (err) {
removeTypingBubble();
appendMessage('Error fetching response. Check your server.', 'bot');
}
}
sendButton.addEventListener('click', sendMessage);
inputField.addEventListener('keypress', (e) => {
if (e.key === 'Enter') sendMessage();
});
</script>
</body>
</html>
Note: Replace the API URL with the API Gateway endpoint, which we are going to deploy in the next step
Deploying API Layer:
Create a Lambda function, Chitrangi, with Python runtime
Visit the API Gateway and create an HTTP API and integrate with the Lambda function you created, and make sure CORS is configured correctly
Note: Lambda is the heart of this system. Will provide the code at the end of this article. Before that, let’s deploy the backend services
Deploying Backend Services:
We need the following 3 services ready to work with Lambda
Dynamo DB
Bedrock
EC2 — runs Elastic Search
Dynamo DB:
Visit Dynamo DB and create 2 tables, one of which holds the movie data
Another table for holding the session data
For chatbot_sessions, keep the session_id as the Partition key and timestamp as the Sort Key
For imdb_movies, keep the movie_id as the Partition Key
Let’s dump the data in the imdb_movies table. I am using this dataset from Kaggle for this chatbot https://www.kaggle.com/datasets/ashpalsingh1525/imdb-movies-dataset
Use this script to dump this data into DynamoDB
import boto3
from botocore.exceptions import ClientError
import pandas as pd
file_path = "imdb_movies.csv"
df = pd.read_csv(file_path)
df = df[["orig_title","overview","genre","crew"]]
modified_df = df.dropna(subset=["orig_title","overview","genre","crew"])
print(modified_df.head())
# Initialize DynamoDB resource
dynamodb = boto3.resource('dynamodb', region_name='ap-south-1') # e.g., 'us-east-1'
table = dynamodb.Table('imdb_movies')
for index, row in modified_df.iterrows():
movie = {
'movie_id': index+1,
'title': row['orig_title'],
'description': row['overview'],
'genre': row['genre'],
'crew': row['crew'],
}
print(f"Inserting: {movie}")
try:
table.put_item(Item=movie)
print(f"Inserted: {movie['title']}")
except ClientError as e:
print(f"Failed to insert {movie['title']}: {e.response['Error']['Message']}")
BedRock:
We need two models for this chatbot—one for converting this data into vectors and one for handling the prompts. I am using these 2 models from the bedrock
Titan Text Embeddings V2 — for generating vectors for the data we stored in DynamoDB
Nova Pro — for handling the user prompts
Note: Please request access to these models to proceed further
EC2 — Elastic search container:
The reason for using EC2 for running Elastic Search instead of using open search service is, I will be working on this on and off for several days. So I need the service to turn off to avoid more when I am not working, and I need it immediately when I want. Opensearch domain creation will take a lot of time, and we can stop and start the service like EC2
To run Elastic Search container in the EC2 instance, please follow this documentation https://www.elastic.co/docs/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic
Instance configuration is t3.large and 16GB of storage, with the port open for Elastic Search is 9200
Dumping data to Elastic Search:
- Use the following script to read data from the Dynamo DB and convert it into vectors, and dump it into the Elastic Search Index
import boto3
import json
import requests
from requests.auth import HTTPBasicAuth
from elasticsearch import Elasticsearch, helpers
# Config
region = "ap-south-1" # Change to your region
dynamodb = boto3.resource("dynamodb", region_name=region)
table = dynamodb.Table("imdb_movies")
bedrock = boto3.client("bedrock-runtime", region_name=region)
es = Elasticsearch(
hosts=["https://elastic search endpoint:9200"], # Replace with your Elasticsearch endpoin
basic_auth=("elastic", "password"), # Basic auth if enabled
verify_certs=False
)
index_name = "imdb_movies"
# Function to embed text using Titan
def get_titan_embedding(text):
payload = {
"inputText": text
}
response = bedrock.invoke_model(
modelId="amazon.titan-embed-text-v2:0",
body=json.dumps(payload),
contentType="application/json"
)
embedding = json.loads(response['body'].read())['embedding']
return embedding
actions = []
last_evaluated_key = None
total_items = 0
while True:
if last_evaluated_key:
response = table.scan(ExclusiveStartKey=last_evaluated_key)
else:
response = table.scan()
items = response.get('Items', [])
total_items += len(items)
print(f"Fetched {len(items)} items. Total fetched: {total_items}")
for item in items:
desc = item['description']
embedding = get_titan_embedding(desc)
action = {
"_index": index_name,
"_id": item['movie_id'],
"_source": {
"movie_id": item['movie_id'],
"title": item['title'],
"description": desc,
"embedding": embedding,
"genre": item.get('genre', ""),
"crew": item.get('crew', ""),
}
}
actions.append(action)
# Optionally flush every N records to avoid memory bloat
if len(actions) >= 500:
helpers.bulk(es, actions)
print(f"Inserted 500 records to Elasticsearch.")
actions.clear()
last_evaluated_key = response.get('LastEvaluatedKey')
if not last_evaluated_key:
break
# Insert any remaining actions
if actions:
helpers.bulk(es, actions)
print(f"Inserted remaining {len(actions)} records to Elasticsearch.")
print(f"Successfully processed {total_items} records from DynamoDB.")
Now that we have everything ready. Let’s start coding in the Lambda function
Lambda function:
Let’s go through step by step here
When the user enters a prompt, that prompt, along with the session ID, will be sent to the Lambda
If there is a session with that session ID in the DynamoDB, we will fetch the previous messages; if not will store the current message as a user role
def store_user_message(session_id, role, message):
timestamp = int(time.time() * 1000) # millisecond precision
session_table.put_item(Item={
'session_id': session_id,
'timestamp': timestamp,
'role': role,
'message': message
})
def get_session_history(session_id):
response = session_table.query(
KeyConditionExpression=Key('session_id').eq(session_id),
ScanIndexForward=True # Sort by timestamp ascending
)
# Convert DynamoDB items into model-ready format
messages = []
for item in response['Items']:
messages.append({
"role": item["role"], # 'user' or 'assistant'
"content": [{"text": item["message"]}]
})
return messages
Then user prompt will be sent for the intent classification to the Nova model.
Here I am using the converse function of the bedrock runtime instead invoke_model to maintain context awareness
The intent classification prompt and calling the converse API
classify_system_prompt = """You are a prompt classifier — not a chatbot. Your only job is to classify the user's **latest message**.
- Use previous messages **only as context** if the user prompt is vague.
- Do not react to previous assistant responses.
- Never generate movie suggestions or details. Do not respond like a chatbot.
Return exactly **one** JSON object matching one of the following:
1. If the user asks for movie details (e.g., who acted, synopsis, or info), return:
{"type": "ask_details", "title": "<movie name>"}
2. If the user is asking for new movie suggestions, return:
{"type": "suggestion", "genre": "<genre>", "mood": "<mood>"}
3. If the user greets (Hi, Hello, etc.), or say thanks return:
{"type": "greeting"}
4. If the user says thanks or thanks you or express his gratitude, return:
{"type":"gratitude"}
4. If the message is not related to movies, return:
{"type": "irrelevant"}
Important:
- Use only the **last user prompt** to determine classification.
- Use prior messages **only to resolve vague references** (like “that movie”).
- Respond ONLY with the JSON. Never generate answers, movie descriptions, or titles unless needed for classification.
"""
classify_system_prompt_obj = {"text":classify_system_prompt}
def send_to_model(message,system_prompt,session_id,get_session_messages):
previous_messages = []
if get_session_messages:
previous_messages = get_session_history(session_id)
previous_messages.append({"role": "user", "content": [{"text": message}]})
print(f"previous messages {previous_messages}")
system_prompts = []
if system_prompt is not None:
system_prompts.append(system_prompt)
temperature = 0.7
top_k = 200
inference_config = {"temperature": temperature}
response = bedrock_nova.converse(
modelId="model id",
messages=previous_messages,
system=system_prompts,
inferenceConfig=inference_config
)
model_text = response["output"]["message"]["content"][0]["text"]
print(f"model_text {model_text}")
return model_text
Based on the classification received from the model, we will categorize each prompt and redirect to the respective task
5 Types of classification I have done here. They are:
greetings: If the user prompt represents a greeting kind of prompt, we will redirect them to generate a greeting response
ask_details: If the user is asking for movie details, we will redirect them to fetch movie details from Elastic Search and will generate a movie info response using the model
suggestions: If the user is asking for a movie suggestion, we will redirect them suggestion category where suggestions will be fetched from the Elastic Search index and will generate a movie suggestion response using the model
gratitude: If the user is expressing his happiness or says thank you, then this category will prompt and respond to the user using a model response
irrelevant: if the user is asking, which is not at all related to movie suggestions or details, will respond back to the user with a polite message saying it is irrelevant to the movies.
def route_prompt(prompt,prompt_type,session_id,json_dump):
if prompt_type == "irrelevant":
response_text = "Looks like your question is irrelevant to movies. I can help you with movie suggestions. Try asking about genres, actors, or moods!"
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "greeting":
response_text = build_greetings_response(prompt,session_id)
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "gratitude":
response_text = build_gratitude_response(prompt,session_id)
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "ask_details":
print("Entered ask details")
if get_movie_details(json_dump,prompt,session_id) is None:
response_text = "I can't find the movie title in our movies list."
return {
"statusCode": 200,
"body": json.dumps({"response": "I can't find the movie title in our movies list."})
}
else:
print("Entered ask details")
response_text = get_movie_details(json_dump, prompt, session_id)
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "suggestion":
response_text = get_movie_suggestions(prompt,json_dump,session_id)
return response_text
else:
return {
"statusCode": 200,
"body": json.dumps({"response": "I am not sure what you are asking for"})
}
- The prompts for generating respective category responses are here
greeting_system_prompt = """
You are a movie chat bot named Chitrangi. Read the user query and prepare a polite response and respond to the user by introducting yourselft as chitrangi.
These are the things you can do. You can suggest movies. You can get a specific movie details. That's it. Nothing morethan that. Try to keep response in a single line
"""
gratitude_system_prompt = """
You are a movie chat bot named Chitrangi.
If the user expresses gratitude (e.g., says "Thanks", "Thank you", etc.), respond warmly, letting them know you're also happy to help. Try to keep response in a single line
"""
movie_details_system_prompt = """
You are a movie chat bot. Based on user_query and movie_object, prepare a polite response and respond to the user.
Only consider the data from the user_query and movie_object. Don't hallucinate. Try to keep everything in a single line
"""
movie_details_system_prompt_obj = {"text":movie_details_system_prompt}
movie_suggestion_system_prompt = f"""
You are a friendly movie chatbot that suggest movies based movies list provided. Movies list is final suggestions you need to provide. Nothing from the internet and no hallucinations.
Don't express your opinion on each movie in the response. Even though you feel the movies in the movies list are not good match for user query.
Your responsibility is to take the movies from the movies list and respond to the user based on user query.
Only suggest the movies from the given movies list. Mention every movie mentioned in the list
Don’t suggest anything outside the list. Don't hallucinate.
The movies list will contain the movie suggestions based on user query.
Don't suggest movies from internet. If the movies list is empty respond to the user that there are no suggestion right now
Respond like you are suggesting the movies from the list by yourself. But don't hallucinate and don't provide movie info from the internet. Try to keep entire response in a single line
"""
movie_suggestion_system_prompt_obj = {"text":movie_suggestion_system_prompt}
- I hope you got some clarity on how the Lambda function works with Elastic Search, DynamoDB, and Bedrock to respond back to the user
Note: The converse API is a very tricky one. Even though you give specific rules to follow, if it identifies any messages that break the given rules in the entire conversation, it will completely ignore the classification rules. Make sure to use it wisely.
The complete Lambda is here:
import json
import boto3
import os
from elasticsearch import Elasticsearch
import ast
from boto3.dynamodb.conditions import Key
import time
region = "ap-south-1"
bedrock = boto3.client("bedrock-runtime", region_name=region)
dynamodb = boto3.resource("dynamodb")
session_table = dynamodb.Table("chatbot_sessions")
bedrock_nova = boto3.client("bedrock-runtime", region_name='us-east-1')
es = Elasticsearch(
hosts=["https://endpoint:9200"],
basic_auth=("elastic", "passowrd"),
verify_certs=False
)
index_name = "imdb_movies"
session_memory = {}
session_id = ""
classify_system_prompt = """You are a prompt classifier — not a chatbot. Your only job is to classify the user's **latest message**.
- Use previous messages **only as context** if the user prompt is vague.
- Do not react to previous assistant responses.
- Never generate movie suggestions or details. Do not respond like a chatbot.
Return exactly **one** JSON object matching one of the following:
1. If the user asks for movie details (e.g., who acted, synopsis, or info), return:
{"type": "ask_details", "title": "<movie name>"}
2. If the user is asking for new movie suggestions, return:
{"type": "suggestion", "genre": "<genre>", "mood": "<mood>"}
3. If the user greets (Hi, Hello, etc.), or say thanks return:
{"type": "greeting"}
4. If the user says thanks or thanks you or express his gratitude, return:
{"type":"gratitude"}
4. If the message is not related to movies, return:
{"type": "irrelevant"}
Important:
- Use only the **last user prompt** to determine classification.
- Use prior messages **only to resolve vague references** (like “that movie”).
- Respond ONLY with the JSON. Never generate answers, movie descriptions, or titles unless needed for classification.
"""
classify_system_prompt_obj = {"text":classify_system_prompt}
greeting_system_prompt = """
You are a movie chat bot named Chitrangi. Read the user query and prepare a polite response and respond to the user by introducting yourselft as chitrangi.
These are the things you can do. You can suggest movies. You can get a specific movie details. That's it. Nothing morethan that. Try to keep response in a single line
"""
gratitude_system_prompt = """
You are a movie chat bot named Chitrangi.
If the user expresses gratitude (e.g., says "Thanks", "Thank you", etc.), respond warmly, letting them know you're also happy to help. Try to keep response in a single line
"""
movie_details_system_prompt = """
You are a movie chat bot. Based on user_query and movie_object, prepare a polite response and respond to the user.
Only consider the data from the user_query and movie_object. Don't hallucinate. Try to keep everything in a single line
"""
movie_details_system_prompt_obj = {"text":movie_details_system_prompt}
movie_suggestion_system_prompt = f"""
You are a friendly movie chatbot that suggest movies based movies list provided. Movies list is final suggestions you need to provide. Nothing from the internet and no hallucinations.
Don't express your opinion on each movie in the response. Even though you feel the movies in the movies list are not good match for user query.
Your responsibility is to take the movies from the movies list and respond to the user based on user query.
Only suggest the movies from the given movies list. Mention every movie mentioned in the list
Don’t suggest anything outside the list. Don't hallucinate.
The movies list will contain the movie suggestions based on user query.
Don't suggest movies from internet. If the movies list is empty respond to the user that there are no suggestion right now
Respond like you are suggesting the movies from the list by yourself. But don't hallucinate and don't provide movie info from the internet. Try to keep entire response in a single line
"""
movie_suggestion_system_prompt_obj = {"text":movie_suggestion_system_prompt}
def store_user_message(session_id, role, message):
timestamp = int(time.time() * 1000) # millisecond precision
session_table.put_item(Item={
'session_id': session_id,
'timestamp': timestamp,
'role': role,
'message': message
})
def get_session_history(session_id):
response = session_table.query(
KeyConditionExpression=Key('session_id').eq(session_id),
ScanIndexForward=True # Sort by timestamp ascending
)
# Convert DynamoDB items into model-ready format
messages = []
for item in response['Items']:
messages.append({
"role": item["role"], # 'user' or 'assistant'
"content": [{"text": item["message"]}]
})
return messages
# Embedding generator
def invoke_bedrock(model_id, payload):
response = bedrock.invoke_model(
modelId=model_id,
body=json.dumps(payload),
contentType="application/json"
)
return json.loads(response["body"].read())
def send_to_model(message,system_prompt,session_id,get_session_messages):
previous_messages = []
if get_session_messages:
previous_messages = get_session_history(session_id)
previous_messages.append({"role": "user", "content": [{"text": message}]})
print(f"previous messages {previous_messages}")
system_prompts = []
if system_prompt is not None:
system_prompts.append(system_prompt)
temperature = 0.7
top_k = 200
inference_config = {"temperature": temperature}
response = bedrock_nova.converse(
modelId="arn:aws:bedrock:us-east-1:556343216872:inference-profile/us.amazon.nova-pro-v1:0",
messages=previous_messages,
system=system_prompts,
inferenceConfig=inference_config
)
model_text = response["output"]["message"]["content"][0]["text"]
print(f"model_text {model_text}")
return model_text
#prompt classifer
def extract_intents_entities(prompt,session_id):
result = send_to_model(prompt,classify_system_prompt_obj,session_id,True)
try:
return result
except Exception as e:
print(f"Error parsing JSON: {e}")
return {}
#movies search
def search_movies(query_embedding, exclude_ids=None, top_k=5):
must_clauses = [{"match_all": {}}]
must_not_clause = []
if exclude_ids:
must_not_clause = [{"terms": {"movie_id": exclude_ids}}]
search_query = {
"size": top_k,
"query": {
"script_score": {
"query": {
"bool": {
"must_not": must_not_clause
}
},
"script": {
"source": "cosineSimilarity(params.query_vector, 'embedding') + 1.0",
"params": {"query_vector": query_embedding}
}
}
}
}
results = es.search(index=index_name, body=search_query)
return [{
"movie_id": hit["_source"]["movie_id"],
"title": hit["_source"]["title"],
"description": hit["_source"]["description"]
} for hit in results["hits"]["hits"]]
#return titan embedding
def get_titan_embedding(text):
result = invoke_bedrock("amazon.titan-embed-text-v2:0", {"inputText": text})
return result["embedding"]
#nova suggestion response building
def build_response(prompt, movies,session_id):
movie_list = []
for movie in movies:
if isinstance(movie, dict) and "title" in movie:
movie_list.append(movie['title'])
prompt_text = f"""
User query: {prompt}
Movies list: {movie_list}"""
response = send_to_model(prompt_text,movie_suggestion_system_prompt_obj,session_id,True)
return response
#nova movie details response building
def build_movie_details_response(user_query,movie_data,session_id):
prompt_text = f"""
user_query= {user_query}
movie_object= {movie_data}"""
response = send_to_model(prompt_text, movie_details_system_prompt_obj, session_id,True)
return response
def build_greetings_response(user_query,session_id):
response = send_to_model(greeting_system_prompt,None,session_id,False)
return response
def build_gratitude_response(user_query,session_id):
response = send_to_model(gratitude_system_prompt,None,session_id,False)
return response
#movies suggestions fetch
def get_movie_suggestions(prompt,extracted, session_id):
session_data = session_memory.get(session_id, {"suggested_movie_ids": []})
suggested_movie_ids = session_data["suggested_movie_ids"]
query_embedding = get_titan_embedding(json.dumps(extracted))
movies = search_movies(query_embedding, exclude_ids=suggested_movie_ids)
new_movie_ids = [m['movie_id'] for m in movies if 'movie_id' in m]
session_memory[session_id] = {
"suggested_movie_ids": suggested_movie_ids + new_movie_ids
}
reply = build_response(prompt, movies,session_id)
# store_user_message(session_id, "assistant", reply)
return {
"statusCode": 200,
"body": json.dumps({"response": reply})
}
#movie details fetch
def get_movie_details(extracted, user_query, session_id):
title = extracted.get('title', '').strip()
# fallback to session memory title
if not title:
title = session_memory.get(session_id, {}).get("last_title", "")
print(f"Fallback to session stored title: {title}")
if not title:
return "I can't find the movie title. Please provide a specific movie title."
# update session with this title
session_data = session_memory.get(session_id, {})
session_data["last_title"] = title
session_memory[session_id] = session_data
response = es.search(index=index_name, body={
"query": {
"match": {
"title": title
}
}
})
if response["hits"]["hits"]:
movie = response["hits"]["hits"][0]["_source"]
movie_object = {
"title": movie.get("title", ""),
"description": movie.get("description", ""),
"genre": movie.get("genre", ""),
"crew": movie.get("crew", "")
}
return build_movie_details_response(user_query, movie_object,session_id)
else:
return "I can't find the movie title in our movies list."
def route_prompt(prompt,prompt_type,session_id,json_dump):
if prompt_type == "irrelevant":
response_text = "Looks like your question is irrelevant to movies. I can help you with movie suggestions. Try asking about genres, actors, or moods!"
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "greeting":
response_text = build_greetings_response(prompt,session_id)
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "gratitude":
response_text = build_gratitude_response(prompt,session_id)
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "ask_details":
print("Entered ask details")
if get_movie_details(json_dump,prompt,session_id) is None:
response_text = "I can't find the movie title in our movies list."
return {
"statusCode": 200,
"body": json.dumps({"response": "I can't find the movie title in our movies list."})
}
else:
print("Entered ask details")
response_text = get_movie_details(json_dump, prompt, session_id)
return {
"statusCode": 200,
"body": json.dumps({"response": response_text})
}
elif prompt_type == "suggestion":
response_text = get_movie_suggestions(prompt,json_dump,session_id)
return response_text
else:
return {
"statusCode": 200,
"body": json.dumps({"response": "I am not sure what you are asking for"})
}
# ------------------ HANDLER FUNCTION ------------------
def lambda_handler(event, context):
try:
body = json.loads(event["body"])
prompt = body.get("message", "")
session_id = body.get("session_id")
print(f"prompt and session id {prompt} and {session_id}")
store_user_message(session_id, "user", prompt)
extracted = extract_intents_entities(prompt,session_id)
store_user_message(session_id,"assistant",extracted)
json_dump = json.loads(extracted)
prompt_type = json_dump['type']
print(f"prompt_type {prompt_type}")
return route_prompt(prompt,prompt_type,session_id,json_dump)
except Exception as e:
print(f"Error: {e}")
return {
"statusCode": 500,
"body": json.dumps({"response": "Sorry I can't understand your query"})
}
I hosted my chatbot at the link below. Give it a try and see how it’s working. If you have any suggestions or struck anywhere, please feel free to comment. I am open to suggestions.
I named my chatbot Chitrangi. Yeah, I know it’s a weird name. Got it from the old Telugu movies
Chitrangi
Chitrangi will be live for 3 days — after that, the EC2 instance goes down, and my AWS bill gets to breathe again
Thanks for reading.. Have a great day…
This content originally appeared on DEV Community and was authored by Salam Shaik