Commit
·
822ac98
1
Parent(s):
240a85a
Deploying new UI for AI messaging system
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .dockerignore +7 -7
- .gitignore +0 -8
- .idea/AI_Message_Generator.iml +4 -2
- .idea/misc.xml +1 -1
- .streamlit/config.toml +0 -3
- CIO/CIO_integration_Python.py +0 -146
- Config_files/message_system_config.json +0 -35
- Config_files/singeo_phrases.txt +0 -25
- Data/Singeo_Camp.csv +0 -2423
- Data/not_active_drumeo_camp.csv +0 -0
- Dockerfile +0 -37
- Messaging_system/Message_generator_2.py +0 -253
- Messaging_system/MultiMessage.py +0 -406
- Messaging_system/MultiMessage_2.py +0 -412
- Messaging_system/Permes.py +0 -202
- Messaging_system/PromptEng.py +0 -268
- Messaging_system/PromptGenerator_2.py +0 -446
- Messaging_system/SnowFlakeConnection.py +0 -262
- Messaging_system/context_validator.py +0 -302
- Messaging_system/protection_layer.py +0 -143
- Messaging_system/sending_time.py +0 -69
- README.md +0 -16
- Singeo_camp.csv +0 -0
- ai_messaging_system_v2/Data/test_camp.json +159 -0
- ai_messaging_system_v2/Data/test_staff.csv +11 -0
- ai_messaging_system_v2/Data/ui_output/.gitkeep +3 -0
- ai_messaging_system_v2/Data/ui_output/message_cost.csv +7 -0
- ai_messaging_system_v2/Data/ui_output/messages_a_drumeo_20260111_2039.csv +0 -0
- ai_messaging_system_v2/Data/ui_output/messages_b_drumeo_20260111_2039.csv +61 -0
- {Messaging_system → ai_messaging_system_v2/Messaging_system}/CoreConfig.py +27 -44
- {Messaging_system → ai_messaging_system_v2/Messaging_system}/DataCollector.py +264 -43
- {Messaging_system → ai_messaging_system_v2/Messaging_system}/Homepage_Recommender.py +0 -0
- {Messaging_system → ai_messaging_system_v2/Messaging_system}/LLM.py +226 -291
- {Messaging_system → ai_messaging_system_v2/Messaging_system}/LLMR.py +187 -33
- {Messaging_system → ai_messaging_system_v2/Messaging_system}/Message_generator.py +207 -90
- ai_messaging_system_v2/Messaging_system/Permes.py +412 -0
- {Messaging_system → ai_messaging_system_v2/Messaging_system}/PromptGenerator.py +112 -82
- ai_messaging_system_v2/Messaging_system/agents/README.md +518 -0
- ai_messaging_system_v2/Messaging_system/agents/__init__.py +20 -0
- ai_messaging_system_v2/Messaging_system/agents/agent_orchestrator.py +234 -0
- ai_messaging_system_v2/Messaging_system/agents/base_agent.py +82 -0
- ai_messaging_system_v2/Messaging_system/agents/generator_agent.py +470 -0
- ai_messaging_system_v2/Messaging_system/agents/rejection_logger.py +209 -0
- ai_messaging_system_v2/Messaging_system/agents/security_agent.py +459 -0
- ai_messaging_system_v2/README.md +489 -0
- ai_messaging_system_v2/UI_MODE_GUIDE.md +495 -0
- ai_messaging_system_v2/configs/README.md +363 -0
- Messaging_system/StoreLayer.py → ai_messaging_system_v2/configs/__init__.py +0 -0
- ai_messaging_system_v2/configs/config_loader.py +208 -0
- ai_messaging_system_v2/configs/drumeo/__init__.py +0 -0
.dockerignore
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
.streamlit/secrets.toml
|
| 6 |
-
|
| 7 |
-
# Ignore the .env file
|
| 8 |
.env
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
**/__pycache__/
|
| 2 |
+
**/*.pyc
|
| 3 |
+
.git
|
| 4 |
+
.gitignore
|
|
|
|
|
|
|
|
|
|
| 5 |
.env
|
| 6 |
+
*.log
|
| 7 |
+
dist
|
| 8 |
+
build
|
.gitignore
DELETED
|
@@ -1,8 +0,0 @@
|
|
| 1 |
-
# Ignore the .streamlit directory and its contents
|
| 2 |
-
Config_files/mysql_credentials.json
|
| 3 |
-
Config_files/secrets.json
|
| 4 |
-
Config_files/snowflake_credentials_Danial.json
|
| 5 |
-
.streamlit/secrets.toml
|
| 6 |
-
|
| 7 |
-
# Ignore the .env file
|
| 8 |
-
.env
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.idea/AI_Message_Generator.iml
CHANGED
|
@@ -1,8 +1,10 @@
|
|
| 1 |
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
<module type="PYTHON_MODULE" version="4">
|
| 3 |
<component name="NewModuleRootManager">
|
| 4 |
-
<content url="file://$MODULE_DIR$"
|
| 5 |
-
|
|
|
|
|
|
|
| 6 |
<orderEntry type="sourceFolder" forTests="false" />
|
| 7 |
</component>
|
| 8 |
</module>
|
|
|
|
| 1 |
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
<module type="PYTHON_MODULE" version="4">
|
| 3 |
<component name="NewModuleRootManager">
|
| 4 |
+
<content url="file://$MODULE_DIR$">
|
| 5 |
+
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
| 6 |
+
</content>
|
| 7 |
+
<orderEntry type="jdk" jdkName="Python 3.9 (musora-machine-learning-messaging-project)" jdkType="Python SDK" />
|
| 8 |
<orderEntry type="sourceFolder" forTests="false" />
|
| 9 |
</component>
|
| 10 |
</module>
|
.idea/misc.xml
CHANGED
|
@@ -3,5 +3,5 @@
|
|
| 3 |
<component name="Black">
|
| 4 |
<option name="sdkName" value="Python 3.9 (AI_Message_Generator)" />
|
| 5 |
</component>
|
| 6 |
-
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
|
| 7 |
</project>
|
|
|
|
| 3 |
<component name="Black">
|
| 4 |
<option name="sdkName" value="Python 3.9 (AI_Message_Generator)" />
|
| 5 |
</component>
|
| 6 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (musora-machine-learning-messaging-project)" project-jdk-type="Python SDK" />
|
| 7 |
</project>
|
.streamlit/config.toml
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
[server]
|
| 2 |
-
enableXsrfProtection = false
|
| 3 |
-
enableCORS = false
|
|
|
|
|
|
|
|
|
|
|
|
CIO/CIO_integration_Python.py
DELETED
|
@@ -1,146 +0,0 @@
|
|
| 1 |
-
import http.client
|
| 2 |
-
import json
|
| 3 |
-
import pandas as pd
|
| 4 |
-
import logging
|
| 5 |
-
import base64
|
| 6 |
-
import requests
|
| 7 |
-
from customerio import CustomerIO, Regions
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
class CustomerIOIntegration:
|
| 11 |
-
def __init__(self, site_id, api_key):
|
| 12 |
-
|
| 13 |
-
self.cio = CustomerIO(site_id=site_id, api_key=api_key)
|
| 14 |
-
logging.basicConfig(level=logging.INFO)
|
| 15 |
-
|
| 16 |
-
# Authentication
|
| 17 |
-
self.site_id = site_id
|
| 18 |
-
self.api_key = api_key
|
| 19 |
-
# Base URL for Customer.io App API endpoints (used for segments management)
|
| 20 |
-
self.base_url = "https://api.customer.io/v1"
|
| 21 |
-
|
| 22 |
-
# Create Basic Auth header
|
| 23 |
-
auth_b64 = base64.b64encode(f"{self.site_id}:{self.api_key}".encode('utf-8')).decode('utf-8')
|
| 24 |
-
self.headers = {
|
| 25 |
-
"Authorization": f"Basic {auth_b64}",
|
| 26 |
-
"Content-Type": "application/json"
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
def add_attributes(self, dataframe):
|
| 30 |
-
|
| 31 |
-
# Filter out rows without messages or cio_id
|
| 32 |
-
filtered_df = dataframe.dropna(subset=['ai_generated_message', 'email'])
|
| 33 |
-
|
| 34 |
-
for index, row in filtered_df.iterrows():
|
| 35 |
-
try:
|
| 36 |
-
self.cio.identify(id=row['email'], ai_generated_message=row['ai_generated_message'])
|
| 37 |
-
logging.info(f"Successfully updated user {row['email']} with message")
|
| 38 |
-
except Exception as e:
|
| 39 |
-
logging.error(f"Failed to update user {row['email']}: {e}")
|
| 40 |
-
|
| 41 |
-
def get_segment(self, segment_name):
|
| 42 |
-
|
| 43 |
-
# Step 1: Check if the segment exists
|
| 44 |
-
resp = requests.get(f"{self.base_url}/segments", headers=self.headers)
|
| 45 |
-
if resp.status_code != 200:
|
| 46 |
-
raise Exception(f"Error fetching segments: {resp.text}")
|
| 47 |
-
|
| 48 |
-
segments = resp.json() # assuming a list of segments is returned
|
| 49 |
-
segment_id = None
|
| 50 |
-
for seg in segments:
|
| 51 |
-
if seg.get("name") == segment_name:
|
| 52 |
-
segment_id = seg.get("id")
|
| 53 |
-
break
|
| 54 |
-
return segment_id
|
| 55 |
-
|
| 56 |
-
def update_segment_from_dataframe(self, df: pd.DataFrame,
|
| 57 |
-
segment_name: str,
|
| 58 |
-
segment_description: str) -> str:
|
| 59 |
-
"""
|
| 60 |
-
Given a pandas DataFrame, create (if needed) and update a Customer.io manual segment.
|
| 61 |
-
|
| 62 |
-
The DataFrame must contain an "email" column (used as the unique identifier) plus other columns
|
| 63 |
-
that become customer attributes.
|
| 64 |
-
|
| 65 |
-
Parameters:
|
| 66 |
-
df: DataFrame containing customer data.
|
| 67 |
-
segment_name: The name of the segment to create or update.
|
| 68 |
-
segment_description: A description for the segment (used when creating it).
|
| 69 |
-
|
| 70 |
-
Returns:
|
| 71 |
-
The segment ID (as returned by the API).
|
| 72 |
-
"""
|
| 73 |
-
segment_id = self.get_segment(segment_name)
|
| 74 |
-
|
| 75 |
-
# If segment does not exist, create it
|
| 76 |
-
if segment_id is None:
|
| 77 |
-
payload = {
|
| 78 |
-
"name": segment_name,
|
| 79 |
-
"description": segment_description,
|
| 80 |
-
"type": "manual" # manual segments require that you add customers explicitly
|
| 81 |
-
}
|
| 82 |
-
resp = requests.post(f"{self.base_url}/segments", headers=self.headers, data=json.dumps(payload))
|
| 83 |
-
if resp.status_code not in (200, 201):
|
| 84 |
-
raise Exception(f"Error creating segment: {resp.text}")
|
| 85 |
-
segment = resp.json()
|
| 86 |
-
segment_id = segment.get("id")
|
| 87 |
-
print(f"Segment '{segment_name}' created with ID: {segment_id}")
|
| 88 |
-
else:
|
| 89 |
-
print(f"Segment '{segment_name}' already exists with ID: {segment_id}")
|
| 90 |
-
|
| 91 |
-
# Step 2: For each row in the DataFrame, update the customer profile.
|
| 92 |
-
# We use the "email" column as the id.
|
| 93 |
-
for index, row in df.iterrows():
|
| 94 |
-
email = row["email"]
|
| 95 |
-
# Prepare a dictionary of attributes (all columns except email)
|
| 96 |
-
attrs = row.drop("email").to_dict()
|
| 97 |
-
# Use the customer.io client to create or update the profile.
|
| 98 |
-
# Note: any keyword argument you pass becomes a custom attribute.
|
| 99 |
-
self.cio.identify(id=email, **attrs)
|
| 100 |
-
|
| 101 |
-
# Step 3: Add all customers (emails) from the DataFrame to the segment.
|
| 102 |
-
customer_ids = df["email"].tolist()
|
| 103 |
-
payload = {
|
| 104 |
-
"ids": customer_ids,
|
| 105 |
-
"id_type": "email" # since we use emails as the identifier
|
| 106 |
-
}
|
| 107 |
-
resp = requests.put(f"{self.base_url}/segments/{segment_id}", headers=self.headers, data=json.dumps(payload))
|
| 108 |
-
if resp.status_code != 200:
|
| 109 |
-
raise Exception(f"Error adding customers to segment: {resp.text}")
|
| 110 |
-
|
| 111 |
-
print(f"Successfully updated segment '{segment_name}' with {len(customer_ids)} customers.")
|
| 112 |
-
return segment_id
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
def load_config_(file_path):
|
| 116 |
-
"""
|
| 117 |
-
Loads configuration JSON files from the local space. (mostly for loading the Snowflake connection parameters)
|
| 118 |
-
:param file_path: local path to the JSON file
|
| 119 |
-
:return: JSON file
|
| 120 |
-
"""
|
| 121 |
-
with open(file_path, 'r') as file:
|
| 122 |
-
return json.load(file)
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
# Example usage
|
| 126 |
-
if __name__ == "__main__":
|
| 127 |
-
data = pd.DataFrame({'email': ['[email protected]'],
|
| 128 |
-
'message': ['This is the second test message'],
|
| 129 |
-
'json_att': [{"message": "test", "url": "test"}]})
|
| 130 |
-
df = pd.DataFrame(data)
|
| 131 |
-
|
| 132 |
-
secrets_file = 'Config_files/secrets.json'
|
| 133 |
-
secrets = load_config_(secrets_file)
|
| 134 |
-
|
| 135 |
-
track_api_key = secrets["MUSORA_CUSTOMER_IO_TRACK_API_KEY"]
|
| 136 |
-
site_id = secrets["MUSORA_CUSTOMER_IO_SITE_ID"]
|
| 137 |
-
api_key = secrets["MUSORA_CUSTOMER_IO_APP_API_KEY"]
|
| 138 |
-
workspace_id = secrets["MUSORA_CUSTOMER_IO_WORKSPACE_ID"]
|
| 139 |
-
|
| 140 |
-
cio_integration = CustomerIOIntegration(api_key=track_api_key, site_id=site_id)
|
| 141 |
-
|
| 142 |
-
# Update (or create) the segment
|
| 143 |
-
segment_id = cio_integration.update_segment_from_dataframe(df,
|
| 144 |
-
segment_name="Danial_ Manual Segment _ AI",
|
| 145 |
-
segment_description="Customers imported from DataFrame")
|
| 146 |
-
print(f"Segment ID: {segment_id}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Config_files/message_system_config.json
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"user_info_features": [
|
| 3 |
-
"first_name",
|
| 4 |
-
"country",
|
| 5 |
-
"instrument",
|
| 6 |
-
"biography",
|
| 7 |
-
"birthday_reminder",
|
| 8 |
-
"topics",
|
| 9 |
-
"genres",
|
| 10 |
-
"last_completed_content"
|
| 11 |
-
],
|
| 12 |
-
"interaction_features": ["last_content_info"],
|
| 13 |
-
"check_feasibility": [
|
| 14 |
-
"first_name",
|
| 15 |
-
"biography",
|
| 16 |
-
"birthday",
|
| 17 |
-
"topics",
|
| 18 |
-
"genres"
|
| 19 |
-
],
|
| 20 |
-
"AI_Jargon": ["elevate", "enhance", "ignite", "reignite", "rekindle", "rediscover","passion", "boost", "fuel", "thrill", "revive", "spark", "performing", "fresh", "tone", "enthusiasm", "illuminate"],
|
| 21 |
-
"singeo_banned_phrases": " Banned phrases:\n Voice is NOT an instrument, so avoid phrases like below:\n - Your voice is waiting\n - Your voice awaits\n - Your voice needs you\n - Your voice is calling\n - Your voice deserves more\n - Hit the high notes / Hit those notes\n - ...\n",
|
| 22 |
-
"AI_phrases_singeo": ["your voice deserves more"],
|
| 23 |
-
"header_limit": 30,
|
| 24 |
-
"message_limit": 135,
|
| 25 |
-
"LLM_models": ["gpt-4o-mini", "gpt-5-mini", "gpt-5-nano", "gemini-2.5-flash", "gemini-2.0-flash","claude-3-5-haiku-latest", "google/gemma-3-27b-instruct/bf-16"],
|
| 26 |
-
"openai_models": ["gpt-4o-mini", "gpt-4o", "gpt-4.1-nano", "gpt-3.5-turbo", "gpt-4.1-mini", "gpt-5-mini", "gpt-5-nano"],
|
| 27 |
-
"reasoning": ["o1", "o4-mini", "o1-mini", "o3-mini", "gpt-5-mini", "gpt-5-nano"],
|
| 28 |
-
"ollama_models": ["deepseek-r1:1.5b", "gemma3:4b", "deepseek-r1:7b", "gemma3:4b"],
|
| 29 |
-
"claude_models": ["claude-3-5-haiku-latest"],
|
| 30 |
-
"inference_models": ["google/gemma-3-27b-instruct/bf-16", "meta-llama/llama-3.2-11b-instruct/fp-16"],
|
| 31 |
-
"google_models": ["gemini-2.5-flash-lite", "gemini-2.5-flash", "gemini-2.0-flash"]
|
| 32 |
-
}
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Config_files/singeo_phrases.txt
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
Let’s get our vocal cords warmed up.
|
| 2 |
-
Time to dive back into vocal practice.
|
| 3 |
-
Let’s work on finding your true voice.
|
| 4 |
-
Just relax, breathe, and sing.
|
| 5 |
-
It’s time to practice our vocal exercises.
|
| 6 |
-
Let’s sing something together.
|
| 7 |
-
Let’s get those voices sounding fantastic.
|
| 8 |
-
We’ll start with some easy warm-ups to get comfortable.
|
| 9 |
-
Don’t worry about sounding perfect – just relax and enjoy the feeling of singing.
|
| 10 |
-
Let’s unlock the power of your voice.
|
| 11 |
-
Get comfortable and confident with your own voice.
|
| 12 |
-
Before long you’ll be singing confidently.
|
| 13 |
-
It’s time to begin developing your own unique voice.
|
| 14 |
-
At the end of this practice you will start to feel the difference in your voice.
|
| 15 |
-
With a little practice, you will be confidently singing the songs you love.
|
| 16 |
-
The voice is a muscle, and it’s important to train it properly.
|
| 17 |
-
With good habits and practice, singing becomes more effortless.
|
| 18 |
-
It’s time to achieve vocal freedom.
|
| 19 |
-
Get comfortable and confident with your own voice.
|
| 20 |
-
If you sing every day, you’ll start to notice improvements very soon.
|
| 21 |
-
Get nice and loose for this vocal warm-up.
|
| 22 |
-
A good practice habit is the best way to quickly get better at singing.
|
| 23 |
-
Start small and build a practice habit from there.
|
| 24 |
-
When you finish this practice, take a minute to celebrate your success.
|
| 25 |
-
Let’s have fun with these vocal warm-up routines.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Data/Singeo_Camp.csv
DELETED
|
@@ -1,2423 +0,0 @@
|
|
| 1 |
-
user_id
|
| 2 |
-
823594
|
| 3 |
-
490901
|
| 4 |
-
515383
|
| 5 |
-
738659
|
| 6 |
-
405746
|
| 7 |
-
826858
|
| 8 |
-
830514
|
| 9 |
-
708823
|
| 10 |
-
700096
|
| 11 |
-
489602
|
| 12 |
-
807335
|
| 13 |
-
846269
|
| 14 |
-
456315
|
| 15 |
-
800896
|
| 16 |
-
609793
|
| 17 |
-
826307
|
| 18 |
-
613971
|
| 19 |
-
825354
|
| 20 |
-
669861
|
| 21 |
-
523917
|
| 22 |
-
735710
|
| 23 |
-
700202
|
| 24 |
-
840829
|
| 25 |
-
734243
|
| 26 |
-
763676
|
| 27 |
-
643793
|
| 28 |
-
818742
|
| 29 |
-
302590
|
| 30 |
-
499800
|
| 31 |
-
716448
|
| 32 |
-
769525
|
| 33 |
-
847737
|
| 34 |
-
479101
|
| 35 |
-
540891
|
| 36 |
-
446726
|
| 37 |
-
363055
|
| 38 |
-
743017
|
| 39 |
-
848335
|
| 40 |
-
705473
|
| 41 |
-
701070
|
| 42 |
-
495042
|
| 43 |
-
656355
|
| 44 |
-
779786
|
| 45 |
-
676878
|
| 46 |
-
794869
|
| 47 |
-
844044
|
| 48 |
-
827970
|
| 49 |
-
430625
|
| 50 |
-
739409
|
| 51 |
-
810790
|
| 52 |
-
775511
|
| 53 |
-
811495
|
| 54 |
-
808519
|
| 55 |
-
532378
|
| 56 |
-
608445
|
| 57 |
-
701667
|
| 58 |
-
789421
|
| 59 |
-
163690
|
| 60 |
-
678395
|
| 61 |
-
404721
|
| 62 |
-
746421
|
| 63 |
-
768924
|
| 64 |
-
721958
|
| 65 |
-
817271
|
| 66 |
-
836460
|
| 67 |
-
699316
|
| 68 |
-
510608
|
| 69 |
-
453523
|
| 70 |
-
833655
|
| 71 |
-
781672
|
| 72 |
-
314754
|
| 73 |
-
849303
|
| 74 |
-
810720
|
| 75 |
-
494223
|
| 76 |
-
847456
|
| 77 |
-
839757
|
| 78 |
-
693488
|
| 79 |
-
836162
|
| 80 |
-
805625
|
| 81 |
-
476787
|
| 82 |
-
844524
|
| 83 |
-
747934
|
| 84 |
-
838128
|
| 85 |
-
734830
|
| 86 |
-
807586
|
| 87 |
-
851405
|
| 88 |
-
149912
|
| 89 |
-
842532
|
| 90 |
-
559012
|
| 91 |
-
164357
|
| 92 |
-
815713
|
| 93 |
-
810869
|
| 94 |
-
752975
|
| 95 |
-
733281
|
| 96 |
-
828436
|
| 97 |
-
513557
|
| 98 |
-
797340
|
| 99 |
-
642719
|
| 100 |
-
642869
|
| 101 |
-
669591
|
| 102 |
-
813698
|
| 103 |
-
777254
|
| 104 |
-
398191
|
| 105 |
-
616835
|
| 106 |
-
590393
|
| 107 |
-
555188
|
| 108 |
-
623332
|
| 109 |
-
837023
|
| 110 |
-
777846
|
| 111 |
-
846779
|
| 112 |
-
510346
|
| 113 |
-
408599
|
| 114 |
-
495594
|
| 115 |
-
607317
|
| 116 |
-
773438
|
| 117 |
-
788691
|
| 118 |
-
701334
|
| 119 |
-
415906
|
| 120 |
-
661126
|
| 121 |
-
483253
|
| 122 |
-
411767
|
| 123 |
-
844693
|
| 124 |
-
843344
|
| 125 |
-
773175
|
| 126 |
-
802679
|
| 127 |
-
772799
|
| 128 |
-
727070
|
| 129 |
-
747532
|
| 130 |
-
828448
|
| 131 |
-
727763
|
| 132 |
-
847610
|
| 133 |
-
771985
|
| 134 |
-
773524
|
| 135 |
-
165152
|
| 136 |
-
609872
|
| 137 |
-
776660
|
| 138 |
-
682607
|
| 139 |
-
780445
|
| 140 |
-
573793
|
| 141 |
-
153159
|
| 142 |
-
626408
|
| 143 |
-
411564
|
| 144 |
-
708388
|
| 145 |
-
836703
|
| 146 |
-
745343
|
| 147 |
-
726006
|
| 148 |
-
771271
|
| 149 |
-
848091
|
| 150 |
-
641630
|
| 151 |
-
832240
|
| 152 |
-
735901
|
| 153 |
-
559381
|
| 154 |
-
780435
|
| 155 |
-
846720
|
| 156 |
-
493231
|
| 157 |
-
764017
|
| 158 |
-
730459
|
| 159 |
-
786647
|
| 160 |
-
803793
|
| 161 |
-
408523
|
| 162 |
-
784226
|
| 163 |
-
697162
|
| 164 |
-
836837
|
| 165 |
-
824994
|
| 166 |
-
750670
|
| 167 |
-
448544
|
| 168 |
-
829329
|
| 169 |
-
790279
|
| 170 |
-
722657
|
| 171 |
-
612059
|
| 172 |
-
826567
|
| 173 |
-
757847
|
| 174 |
-
603183
|
| 175 |
-
506591
|
| 176 |
-
846162
|
| 177 |
-
690777
|
| 178 |
-
833627
|
| 179 |
-
844260
|
| 180 |
-
588385
|
| 181 |
-
733786
|
| 182 |
-
717260
|
| 183 |
-
812919
|
| 184 |
-
503894
|
| 185 |
-
630100
|
| 186 |
-
463325
|
| 187 |
-
737462
|
| 188 |
-
668319
|
| 189 |
-
795273
|
| 190 |
-
843246
|
| 191 |
-
849499
|
| 192 |
-
581648
|
| 193 |
-
155212
|
| 194 |
-
840153
|
| 195 |
-
791259
|
| 196 |
-
749093
|
| 197 |
-
751829
|
| 198 |
-
840130
|
| 199 |
-
591118
|
| 200 |
-
437140
|
| 201 |
-
389366
|
| 202 |
-
824226
|
| 203 |
-
683245
|
| 204 |
-
570665
|
| 205 |
-
509855
|
| 206 |
-
775494
|
| 207 |
-
431499
|
| 208 |
-
430336
|
| 209 |
-
362939
|
| 210 |
-
426802
|
| 211 |
-
773757
|
| 212 |
-
804626
|
| 213 |
-
439681
|
| 214 |
-
409011
|
| 215 |
-
152842
|
| 216 |
-
682166
|
| 217 |
-
320112
|
| 218 |
-
582871
|
| 219 |
-
336584
|
| 220 |
-
771283
|
| 221 |
-
402069
|
| 222 |
-
527950
|
| 223 |
-
424730
|
| 224 |
-
638352
|
| 225 |
-
155728
|
| 226 |
-
617756
|
| 227 |
-
346700
|
| 228 |
-
483219
|
| 229 |
-
607205
|
| 230 |
-
732002
|
| 231 |
-
477743
|
| 232 |
-
691258
|
| 233 |
-
434743
|
| 234 |
-
529165
|
| 235 |
-
330043
|
| 236 |
-
622033
|
| 237 |
-
529290
|
| 238 |
-
160698
|
| 239 |
-
498601
|
| 240 |
-
151586
|
| 241 |
-
825306
|
| 242 |
-
522815
|
| 243 |
-
266543
|
| 244 |
-
527590
|
| 245 |
-
772152
|
| 246 |
-
475201
|
| 247 |
-
316516
|
| 248 |
-
474763
|
| 249 |
-
478124
|
| 250 |
-
646712
|
| 251 |
-
504354
|
| 252 |
-
776480
|
| 253 |
-
577798
|
| 254 |
-
219993
|
| 255 |
-
689888
|
| 256 |
-
801362
|
| 257 |
-
489651
|
| 258 |
-
428722
|
| 259 |
-
538821
|
| 260 |
-
837192
|
| 261 |
-
773565
|
| 262 |
-
365543
|
| 263 |
-
518644
|
| 264 |
-
502264
|
| 265 |
-
168502
|
| 266 |
-
173544
|
| 267 |
-
793873
|
| 268 |
-
342607
|
| 269 |
-
849725
|
| 270 |
-
792925
|
| 271 |
-
681845
|
| 272 |
-
348059
|
| 273 |
-
769500
|
| 274 |
-
573430
|
| 275 |
-
774603
|
| 276 |
-
432365
|
| 277 |
-
769633
|
| 278 |
-
435278
|
| 279 |
-
506422
|
| 280 |
-
496848
|
| 281 |
-
637282
|
| 282 |
-
391164
|
| 283 |
-
521431
|
| 284 |
-
504766
|
| 285 |
-
748512
|
| 286 |
-
843534
|
| 287 |
-
417465
|
| 288 |
-
839155
|
| 289 |
-
496403
|
| 290 |
-
775852
|
| 291 |
-
644957
|
| 292 |
-
402315
|
| 293 |
-
404292
|
| 294 |
-
635168
|
| 295 |
-
680425
|
| 296 |
-
547984
|
| 297 |
-
455873
|
| 298 |
-
688802
|
| 299 |
-
651571
|
| 300 |
-
179345
|
| 301 |
-
833093
|
| 302 |
-
224454
|
| 303 |
-
340633
|
| 304 |
-
558447
|
| 305 |
-
638432
|
| 306 |
-
834048
|
| 307 |
-
846206
|
| 308 |
-
635364
|
| 309 |
-
726260
|
| 310 |
-
815259
|
| 311 |
-
826627
|
| 312 |
-
848105
|
| 313 |
-
830959
|
| 314 |
-
685538
|
| 315 |
-
758889
|
| 316 |
-
668269
|
| 317 |
-
739666
|
| 318 |
-
506010
|
| 319 |
-
815330
|
| 320 |
-
846254
|
| 321 |
-
553275
|
| 322 |
-
454499
|
| 323 |
-
265835
|
| 324 |
-
772933
|
| 325 |
-
547424
|
| 326 |
-
772562
|
| 327 |
-
793888
|
| 328 |
-
619788
|
| 329 |
-
506322
|
| 330 |
-
638817
|
| 331 |
-
760751
|
| 332 |
-
609402
|
| 333 |
-
490814
|
| 334 |
-
761786
|
| 335 |
-
845620
|
| 336 |
-
609916
|
| 337 |
-
671055
|
| 338 |
-
827730
|
| 339 |
-
729361
|
| 340 |
-
711958
|
| 341 |
-
573164
|
| 342 |
-
504473
|
| 343 |
-
840408
|
| 344 |
-
604810
|
| 345 |
-
760760
|
| 346 |
-
156674
|
| 347 |
-
528863
|
| 348 |
-
620355
|
| 349 |
-
409083
|
| 350 |
-
796885
|
| 351 |
-
762290
|
| 352 |
-
764323
|
| 353 |
-
814876
|
| 354 |
-
833921
|
| 355 |
-
587492
|
| 356 |
-
764701
|
| 357 |
-
776556
|
| 358 |
-
771445
|
| 359 |
-
673717
|
| 360 |
-
461055
|
| 361 |
-
610853
|
| 362 |
-
840835
|
| 363 |
-
149853
|
| 364 |
-
606064
|
| 365 |
-
628062
|
| 366 |
-
394595
|
| 367 |
-
407917
|
| 368 |
-
802720
|
| 369 |
-
777875
|
| 370 |
-
558869
|
| 371 |
-
753886
|
| 372 |
-
738757
|
| 373 |
-
502669
|
| 374 |
-
528920
|
| 375 |
-
814865
|
| 376 |
-
619115
|
| 377 |
-
687962
|
| 378 |
-
809784
|
| 379 |
-
770461
|
| 380 |
-
600711
|
| 381 |
-
847598
|
| 382 |
-
572181
|
| 383 |
-
681817
|
| 384 |
-
551223
|
| 385 |
-
515764
|
| 386 |
-
724633
|
| 387 |
-
277382
|
| 388 |
-
638779
|
| 389 |
-
847595
|
| 390 |
-
775659
|
| 391 |
-
785785
|
| 392 |
-
428762
|
| 393 |
-
715942
|
| 394 |
-
591901
|
| 395 |
-
366729
|
| 396 |
-
739424
|
| 397 |
-
784349
|
| 398 |
-
535431
|
| 399 |
-
731078
|
| 400 |
-
830298
|
| 401 |
-
542037
|
| 402 |
-
664458
|
| 403 |
-
769695
|
| 404 |
-
704898
|
| 405 |
-
846071
|
| 406 |
-
398730
|
| 407 |
-
360732
|
| 408 |
-
460593
|
| 409 |
-
777080
|
| 410 |
-
492467
|
| 411 |
-
634693
|
| 412 |
-
415064
|
| 413 |
-
360230
|
| 414 |
-
657762
|
| 415 |
-
850521
|
| 416 |
-
572438
|
| 417 |
-
400059
|
| 418 |
-
746719
|
| 419 |
-
426122
|
| 420 |
-
848017
|
| 421 |
-
678376
|
| 422 |
-
841610
|
| 423 |
-
737346
|
| 424 |
-
739712
|
| 425 |
-
839233
|
| 426 |
-
509636
|
| 427 |
-
536485
|
| 428 |
-
760548
|
| 429 |
-
819858
|
| 430 |
-
397254
|
| 431 |
-
492387
|
| 432 |
-
526490
|
| 433 |
-
625800
|
| 434 |
-
637910
|
| 435 |
-
792458
|
| 436 |
-
820801
|
| 437 |
-
287072
|
| 438 |
-
670546
|
| 439 |
-
277307
|
| 440 |
-
776543
|
| 441 |
-
583218
|
| 442 |
-
553139
|
| 443 |
-
849611
|
| 444 |
-
621132
|
| 445 |
-
339605
|
| 446 |
-
825339
|
| 447 |
-
840561
|
| 448 |
-
502668
|
| 449 |
-
594181
|
| 450 |
-
775449
|
| 451 |
-
843680
|
| 452 |
-
534695
|
| 453 |
-
157386
|
| 454 |
-
610424
|
| 455 |
-
817223
|
| 456 |
-
545897
|
| 457 |
-
451469
|
| 458 |
-
572567
|
| 459 |
-
717350
|
| 460 |
-
790818
|
| 461 |
-
761108
|
| 462 |
-
775873
|
| 463 |
-
778351
|
| 464 |
-
754644
|
| 465 |
-
681999
|
| 466 |
-
799473
|
| 467 |
-
729857
|
| 468 |
-
793350
|
| 469 |
-
808406
|
| 470 |
-
507571
|
| 471 |
-
774482
|
| 472 |
-
563356
|
| 473 |
-
728631
|
| 474 |
-
581828
|
| 475 |
-
835184
|
| 476 |
-
680499
|
| 477 |
-
621659
|
| 478 |
-
748517
|
| 479 |
-
347059
|
| 480 |
-
782526
|
| 481 |
-
737999
|
| 482 |
-
701912
|
| 483 |
-
809973
|
| 484 |
-
350152
|
| 485 |
-
453628
|
| 486 |
-
700524
|
| 487 |
-
394419
|
| 488 |
-
809594
|
| 489 |
-
809863
|
| 490 |
-
353735
|
| 491 |
-
808645
|
| 492 |
-
746506
|
| 493 |
-
826657
|
| 494 |
-
579376
|
| 495 |
-
452590
|
| 496 |
-
453846
|
| 497 |
-
733110
|
| 498 |
-
448444
|
| 499 |
-
795397
|
| 500 |
-
163687
|
| 501 |
-
576950
|
| 502 |
-
773859
|
| 503 |
-
308899
|
| 504 |
-
637266
|
| 505 |
-
760925
|
| 506 |
-
497688
|
| 507 |
-
585293
|
| 508 |
-
845012
|
| 509 |
-
659328
|
| 510 |
-
568397
|
| 511 |
-
450808
|
| 512 |
-
670046
|
| 513 |
-
694740
|
| 514 |
-
363928
|
| 515 |
-
615096
|
| 516 |
-
772991
|
| 517 |
-
396929
|
| 518 |
-
397067
|
| 519 |
-
588312
|
| 520 |
-
150378
|
| 521 |
-
474580
|
| 522 |
-
433165
|
| 523 |
-
457270
|
| 524 |
-
394429
|
| 525 |
-
544240
|
| 526 |
-
312241
|
| 527 |
-
337048
|
| 528 |
-
472068
|
| 529 |
-
775102
|
| 530 |
-
663723
|
| 531 |
-
357332
|
| 532 |
-
724027
|
| 533 |
-
694874
|
| 534 |
-
767677
|
| 535 |
-
737172
|
| 536 |
-
518516
|
| 537 |
-
536744
|
| 538 |
-
582622
|
| 539 |
-
244298
|
| 540 |
-
581361
|
| 541 |
-
543312
|
| 542 |
-
387110
|
| 543 |
-
529344
|
| 544 |
-
788314
|
| 545 |
-
543701
|
| 546 |
-
506526
|
| 547 |
-
483897
|
| 548 |
-
310676
|
| 549 |
-
641239
|
| 550 |
-
679283
|
| 551 |
-
762154
|
| 552 |
-
694879
|
| 553 |
-
569673
|
| 554 |
-
168868
|
| 555 |
-
444835
|
| 556 |
-
182975
|
| 557 |
-
509972
|
| 558 |
-
534055
|
| 559 |
-
549621
|
| 560 |
-
835864
|
| 561 |
-
734032
|
| 562 |
-
387802
|
| 563 |
-
341561
|
| 564 |
-
751726
|
| 565 |
-
775924
|
| 566 |
-
587812
|
| 567 |
-
582850
|
| 568 |
-
829059
|
| 569 |
-
779006
|
| 570 |
-
303410
|
| 571 |
-
795790
|
| 572 |
-
327260
|
| 573 |
-
477325
|
| 574 |
-
510625
|
| 575 |
-
452891
|
| 576 |
-
425579
|
| 577 |
-
830117
|
| 578 |
-
843459
|
| 579 |
-
767912
|
| 580 |
-
744531
|
| 581 |
-
534853
|
| 582 |
-
331308
|
| 583 |
-
568474
|
| 584 |
-
849322
|
| 585 |
-
610974
|
| 586 |
-
783468
|
| 587 |
-
166174
|
| 588 |
-
149681
|
| 589 |
-
641651
|
| 590 |
-
398207
|
| 591 |
-
472902
|
| 592 |
-
664543
|
| 593 |
-
700783
|
| 594 |
-
407866
|
| 595 |
-
685933
|
| 596 |
-
850098
|
| 597 |
-
619377
|
| 598 |
-
522953
|
| 599 |
-
742673
|
| 600 |
-
496309
|
| 601 |
-
777520
|
| 602 |
-
721273
|
| 603 |
-
527102
|
| 604 |
-
566265
|
| 605 |
-
807588
|
| 606 |
-
830943
|
| 607 |
-
162449
|
| 608 |
-
821730
|
| 609 |
-
628142
|
| 610 |
-
708433
|
| 611 |
-
776546
|
| 612 |
-
676950
|
| 613 |
-
734593
|
| 614 |
-
444803
|
| 615 |
-
295680
|
| 616 |
-
436760
|
| 617 |
-
173080
|
| 618 |
-
831699
|
| 619 |
-
776055
|
| 620 |
-
819906
|
| 621 |
-
834334
|
| 622 |
-
717288
|
| 623 |
-
149770
|
| 624 |
-
154282
|
| 625 |
-
753709
|
| 626 |
-
815416
|
| 627 |
-
520570
|
| 628 |
-
834940
|
| 629 |
-
350516
|
| 630 |
-
832773
|
| 631 |
-
777094
|
| 632 |
-
847805
|
| 633 |
-
307461
|
| 634 |
-
728520
|
| 635 |
-
740103
|
| 636 |
-
711989
|
| 637 |
-
800647
|
| 638 |
-
520849
|
| 639 |
-
795953
|
| 640 |
-
683950
|
| 641 |
-
733885
|
| 642 |
-
426856
|
| 643 |
-
622050
|
| 644 |
-
849340
|
| 645 |
-
602125
|
| 646 |
-
632679
|
| 647 |
-
794347
|
| 648 |
-
804676
|
| 649 |
-
725603
|
| 650 |
-
818957
|
| 651 |
-
833010
|
| 652 |
-
623557
|
| 653 |
-
769905
|
| 654 |
-
594243
|
| 655 |
-
438299
|
| 656 |
-
833736
|
| 657 |
-
691429
|
| 658 |
-
836559
|
| 659 |
-
691015
|
| 660 |
-
840558
|
| 661 |
-
506540
|
| 662 |
-
617941
|
| 663 |
-
584575
|
| 664 |
-
754369
|
| 665 |
-
534601
|
| 666 |
-
846088
|
| 667 |
-
528696
|
| 668 |
-
761024
|
| 669 |
-
330153
|
| 670 |
-
307555
|
| 671 |
-
396562
|
| 672 |
-
750888
|
| 673 |
-
508567
|
| 674 |
-
151802
|
| 675 |
-
734070
|
| 676 |
-
767156
|
| 677 |
-
836241
|
| 678 |
-
524750
|
| 679 |
-
828197
|
| 680 |
-
792594
|
| 681 |
-
796263
|
| 682 |
-
820525
|
| 683 |
-
817186
|
| 684 |
-
848414
|
| 685 |
-
791055
|
| 686 |
-
594299
|
| 687 |
-
555274
|
| 688 |
-
781784
|
| 689 |
-
317310
|
| 690 |
-
453867
|
| 691 |
-
672955
|
| 692 |
-
727739
|
| 693 |
-
831862
|
| 694 |
-
726247
|
| 695 |
-
832588
|
| 696 |
-
847392
|
| 697 |
-
599048
|
| 698 |
-
829889
|
| 699 |
-
565766
|
| 700 |
-
500667
|
| 701 |
-
745657
|
| 702 |
-
833682
|
| 703 |
-
849229
|
| 704 |
-
756664
|
| 705 |
-
839347
|
| 706 |
-
489625
|
| 707 |
-
728982
|
| 708 |
-
738367
|
| 709 |
-
533542
|
| 710 |
-
502479
|
| 711 |
-
838379
|
| 712 |
-
484562
|
| 713 |
-
776600
|
| 714 |
-
654420
|
| 715 |
-
822835
|
| 716 |
-
819608
|
| 717 |
-
698109
|
| 718 |
-
764174
|
| 719 |
-
770429
|
| 720 |
-
816694
|
| 721 |
-
709270
|
| 722 |
-
642279
|
| 723 |
-
836356
|
| 724 |
-
846010
|
| 725 |
-
642369
|
| 726 |
-
499681
|
| 727 |
-
488377
|
| 728 |
-
631284
|
| 729 |
-
842806
|
| 730 |
-
811504
|
| 731 |
-
844909
|
| 732 |
-
699904
|
| 733 |
-
808604
|
| 734 |
-
845179
|
| 735 |
-
768156
|
| 736 |
-
822402
|
| 737 |
-
839600
|
| 738 |
-
592647
|
| 739 |
-
847946
|
| 740 |
-
621974
|
| 741 |
-
510991
|
| 742 |
-
690307
|
| 743 |
-
652407
|
| 744 |
-
602254
|
| 745 |
-
812810
|
| 746 |
-
825611
|
| 747 |
-
617476
|
| 748 |
-
549193
|
| 749 |
-
520057
|
| 750 |
-
364669
|
| 751 |
-
680674
|
| 752 |
-
822284
|
| 753 |
-
697333
|
| 754 |
-
807871
|
| 755 |
-
708806
|
| 756 |
-
807522
|
| 757 |
-
822326
|
| 758 |
-
546417
|
| 759 |
-
459922
|
| 760 |
-
439475
|
| 761 |
-
833903
|
| 762 |
-
749240
|
| 763 |
-
809839
|
| 764 |
-
566165
|
| 765 |
-
802092
|
| 766 |
-
520330
|
| 767 |
-
459279
|
| 768 |
-
494887
|
| 769 |
-
733006
|
| 770 |
-
739312
|
| 771 |
-
545163
|
| 772 |
-
446670
|
| 773 |
-
619851
|
| 774 |
-
820355
|
| 775 |
-
834851
|
| 776 |
-
777699
|
| 777 |
-
603175
|
| 778 |
-
479862
|
| 779 |
-
845962
|
| 780 |
-
776709
|
| 781 |
-
789739
|
| 782 |
-
457957
|
| 783 |
-
541961
|
| 784 |
-
829285
|
| 785 |
-
751659
|
| 786 |
-
626323
|
| 787 |
-
606199
|
| 788 |
-
691067
|
| 789 |
-
494324
|
| 790 |
-
323457
|
| 791 |
-
603476
|
| 792 |
-
744417
|
| 793 |
-
671088
|
| 794 |
-
576961
|
| 795 |
-
358017
|
| 796 |
-
403844
|
| 797 |
-
326754
|
| 798 |
-
776511
|
| 799 |
-
685880
|
| 800 |
-
693446
|
| 801 |
-
771967
|
| 802 |
-
349273
|
| 803 |
-
696862
|
| 804 |
-
852247
|
| 805 |
-
748948
|
| 806 |
-
346764
|
| 807 |
-
625792
|
| 808 |
-
772579
|
| 809 |
-
366953
|
| 810 |
-
449463
|
| 811 |
-
326311
|
| 812 |
-
438014
|
| 813 |
-
346844
|
| 814 |
-
597546
|
| 815 |
-
806144
|
| 816 |
-
813393
|
| 817 |
-
342050
|
| 818 |
-
773709
|
| 819 |
-
321738
|
| 820 |
-
328501
|
| 821 |
-
595429
|
| 822 |
-
773449
|
| 823 |
-
591029
|
| 824 |
-
460155
|
| 825 |
-
641822
|
| 826 |
-
166803
|
| 827 |
-
763327
|
| 828 |
-
845476
|
| 829 |
-
822023
|
| 830 |
-
781884
|
| 831 |
-
741054
|
| 832 |
-
511204
|
| 833 |
-
699718
|
| 834 |
-
793533
|
| 835 |
-
808019
|
| 836 |
-
491001
|
| 837 |
-
792658
|
| 838 |
-
494888
|
| 839 |
-
776029
|
| 840 |
-
521260
|
| 841 |
-
494204
|
| 842 |
-
844062
|
| 843 |
-
356833
|
| 844 |
-
266105
|
| 845 |
-
606727
|
| 846 |
-
345167
|
| 847 |
-
501030
|
| 848 |
-
412994
|
| 849 |
-
404301
|
| 850 |
-
776269
|
| 851 |
-
403394
|
| 852 |
-
808760
|
| 853 |
-
315194
|
| 854 |
-
658960
|
| 855 |
-
505915
|
| 856 |
-
449270
|
| 857 |
-
633334
|
| 858 |
-
542929
|
| 859 |
-
350232
|
| 860 |
-
425562
|
| 861 |
-
360205
|
| 862 |
-
271932
|
| 863 |
-
397433
|
| 864 |
-
364021
|
| 865 |
-
360365
|
| 866 |
-
510908
|
| 867 |
-
307275
|
| 868 |
-
596945
|
| 869 |
-
325866
|
| 870 |
-
777720
|
| 871 |
-
557407
|
| 872 |
-
695251
|
| 873 |
-
524088
|
| 874 |
-
619432
|
| 875 |
-
715088
|
| 876 |
-
539235
|
| 877 |
-
840134
|
| 878 |
-
818341
|
| 879 |
-
777645
|
| 880 |
-
510826
|
| 881 |
-
526989
|
| 882 |
-
151579
|
| 883 |
-
680528
|
| 884 |
-
156563
|
| 885 |
-
666287
|
| 886 |
-
365127
|
| 887 |
-
487458
|
| 888 |
-
829542
|
| 889 |
-
581611
|
| 890 |
-
151327
|
| 891 |
-
534844
|
| 892 |
-
593451
|
| 893 |
-
698638
|
| 894 |
-
776159
|
| 895 |
-
849287
|
| 896 |
-
396453
|
| 897 |
-
701017
|
| 898 |
-
842796
|
| 899 |
-
767432
|
| 900 |
-
496240
|
| 901 |
-
606439
|
| 902 |
-
775731
|
| 903 |
-
330010
|
| 904 |
-
728101
|
| 905 |
-
651101
|
| 906 |
-
392751
|
| 907 |
-
728781
|
| 908 |
-
813848
|
| 909 |
-
548589
|
| 910 |
-
737238
|
| 911 |
-
612591
|
| 912 |
-
754112
|
| 913 |
-
152761
|
| 914 |
-
843964
|
| 915 |
-
851370
|
| 916 |
-
735063
|
| 917 |
-
821770
|
| 918 |
-
556217
|
| 919 |
-
366536
|
| 920 |
-
676958
|
| 921 |
-
154958
|
| 922 |
-
567444
|
| 923 |
-
723202
|
| 924 |
-
736335
|
| 925 |
-
346375
|
| 926 |
-
840101
|
| 927 |
-
489737
|
| 928 |
-
837522
|
| 929 |
-
728778
|
| 930 |
-
830871
|
| 931 |
-
836203
|
| 932 |
-
840705
|
| 933 |
-
809916
|
| 934 |
-
534108
|
| 935 |
-
848917
|
| 936 |
-
422044
|
| 937 |
-
540889
|
| 938 |
-
415273
|
| 939 |
-
485544
|
| 940 |
-
442195
|
| 941 |
-
563450
|
| 942 |
-
771964
|
| 943 |
-
798123
|
| 944 |
-
607302
|
| 945 |
-
363529
|
| 946 |
-
771000
|
| 947 |
-
777862
|
| 948 |
-
797376
|
| 949 |
-
161678
|
| 950 |
-
626817
|
| 951 |
-
793449
|
| 952 |
-
814504
|
| 953 |
-
699891
|
| 954 |
-
731931
|
| 955 |
-
686492
|
| 956 |
-
696739
|
| 957 |
-
843905
|
| 958 |
-
396113
|
| 959 |
-
810423
|
| 960 |
-
717652
|
| 961 |
-
773335
|
| 962 |
-
811026
|
| 963 |
-
367447
|
| 964 |
-
796125
|
| 965 |
-
536785
|
| 966 |
-
694050
|
| 967 |
-
520031
|
| 968 |
-
821315
|
| 969 |
-
710498
|
| 970 |
-
720216
|
| 971 |
-
558567
|
| 972 |
-
836458
|
| 973 |
-
475394
|
| 974 |
-
489700
|
| 975 |
-
763598
|
| 976 |
-
710254
|
| 977 |
-
723656
|
| 978 |
-
408460
|
| 979 |
-
771012
|
| 980 |
-
647613
|
| 981 |
-
758453
|
| 982 |
-
569227
|
| 983 |
-
764580
|
| 984 |
-
744268
|
| 985 |
-
498461
|
| 986 |
-
771363
|
| 987 |
-
739290
|
| 988 |
-
850615
|
| 989 |
-
763206
|
| 990 |
-
411073
|
| 991 |
-
750224
|
| 992 |
-
737147
|
| 993 |
-
402696
|
| 994 |
-
845312
|
| 995 |
-
304639
|
| 996 |
-
844427
|
| 997 |
-
639105
|
| 998 |
-
827832
|
| 999 |
-
731574
|
| 1000 |
-
803975
|
| 1001 |
-
662014
|
| 1002 |
-
574987
|
| 1003 |
-
823860
|
| 1004 |
-
352961
|
| 1005 |
-
794503
|
| 1006 |
-
839994
|
| 1007 |
-
792877
|
| 1008 |
-
806297
|
| 1009 |
-
550251
|
| 1010 |
-
837708
|
| 1011 |
-
790542
|
| 1012 |
-
789096
|
| 1013 |
-
819267
|
| 1014 |
-
697430
|
| 1015 |
-
522691
|
| 1016 |
-
847312
|
| 1017 |
-
422009
|
| 1018 |
-
800656
|
| 1019 |
-
461067
|
| 1020 |
-
780691
|
| 1021 |
-
805126
|
| 1022 |
-
782840
|
| 1023 |
-
336618
|
| 1024 |
-
358903
|
| 1025 |
-
805888
|
| 1026 |
-
847694
|
| 1027 |
-
722474
|
| 1028 |
-
712014
|
| 1029 |
-
513154
|
| 1030 |
-
637999
|
| 1031 |
-
830316
|
| 1032 |
-
701961
|
| 1033 |
-
821374
|
| 1034 |
-
775922
|
| 1035 |
-
827994
|
| 1036 |
-
835198
|
| 1037 |
-
545611
|
| 1038 |
-
825894
|
| 1039 |
-
555626
|
| 1040 |
-
621989
|
| 1041 |
-
778372
|
| 1042 |
-
818225
|
| 1043 |
-
778251
|
| 1044 |
-
154313
|
| 1045 |
-
810427
|
| 1046 |
-
268298
|
| 1047 |
-
839425
|
| 1048 |
-
815456
|
| 1049 |
-
430109
|
| 1050 |
-
497987
|
| 1051 |
-
544180
|
| 1052 |
-
843623
|
| 1053 |
-
719716
|
| 1054 |
-
362472
|
| 1055 |
-
606479
|
| 1056 |
-
835281
|
| 1057 |
-
346314
|
| 1058 |
-
610282
|
| 1059 |
-
841099
|
| 1060 |
-
740082
|
| 1061 |
-
689022
|
| 1062 |
-
460398
|
| 1063 |
-
839446
|
| 1064 |
-
696731
|
| 1065 |
-
830905
|
| 1066 |
-
659046
|
| 1067 |
-
490092
|
| 1068 |
-
846372
|
| 1069 |
-
765558
|
| 1070 |
-
423855
|
| 1071 |
-
840977
|
| 1072 |
-
499785
|
| 1073 |
-
713359
|
| 1074 |
-
816990
|
| 1075 |
-
159141
|
| 1076 |
-
639232
|
| 1077 |
-
736766
|
| 1078 |
-
778314
|
| 1079 |
-
737275
|
| 1080 |
-
809757
|
| 1081 |
-
770279
|
| 1082 |
-
158961
|
| 1083 |
-
840799
|
| 1084 |
-
712005
|
| 1085 |
-
690625
|
| 1086 |
-
697847
|
| 1087 |
-
730307
|
| 1088 |
-
835502
|
| 1089 |
-
551116
|
| 1090 |
-
840491
|
| 1091 |
-
748326
|
| 1092 |
-
563983
|
| 1093 |
-
843631
|
| 1094 |
-
602224
|
| 1095 |
-
416699
|
| 1096 |
-
337501
|
| 1097 |
-
349907
|
| 1098 |
-
445611
|
| 1099 |
-
563571
|
| 1100 |
-
171769
|
| 1101 |
-
583464
|
| 1102 |
-
844083
|
| 1103 |
-
602123
|
| 1104 |
-
483231
|
| 1105 |
-
778269
|
| 1106 |
-
416031
|
| 1107 |
-
524574
|
| 1108 |
-
440594
|
| 1109 |
-
532863
|
| 1110 |
-
330874
|
| 1111 |
-
553756
|
| 1112 |
-
621807
|
| 1113 |
-
642600
|
| 1114 |
-
330218
|
| 1115 |
-
331494
|
| 1116 |
-
812449
|
| 1117 |
-
408664
|
| 1118 |
-
354720
|
| 1119 |
-
603354
|
| 1120 |
-
453801
|
| 1121 |
-
772422
|
| 1122 |
-
474464
|
| 1123 |
-
270702
|
| 1124 |
-
751822
|
| 1125 |
-
462621
|
| 1126 |
-
456320
|
| 1127 |
-
170343
|
| 1128 |
-
393699
|
| 1129 |
-
634607
|
| 1130 |
-
822841
|
| 1131 |
-
832233
|
| 1132 |
-
595526
|
| 1133 |
-
358241
|
| 1134 |
-
708138
|
| 1135 |
-
354438
|
| 1136 |
-
671007
|
| 1137 |
-
537994
|
| 1138 |
-
352344
|
| 1139 |
-
502262
|
| 1140 |
-
536674
|
| 1141 |
-
778036
|
| 1142 |
-
296369
|
| 1143 |
-
692366
|
| 1144 |
-
711647
|
| 1145 |
-
557263
|
| 1146 |
-
638830
|
| 1147 |
-
606347
|
| 1148 |
-
422092
|
| 1149 |
-
532633
|
| 1150 |
-
575155
|
| 1151 |
-
425440
|
| 1152 |
-
294441
|
| 1153 |
-
422825
|
| 1154 |
-
516509
|
| 1155 |
-
581198
|
| 1156 |
-
776259
|
| 1157 |
-
779585
|
| 1158 |
-
617604
|
| 1159 |
-
824109
|
| 1160 |
-
678011
|
| 1161 |
-
561583
|
| 1162 |
-
437991
|
| 1163 |
-
667578
|
| 1164 |
-
774370
|
| 1165 |
-
699248
|
| 1166 |
-
792204
|
| 1167 |
-
152661
|
| 1168 |
-
565665
|
| 1169 |
-
517642
|
| 1170 |
-
670020
|
| 1171 |
-
472852
|
| 1172 |
-
618636
|
| 1173 |
-
479414
|
| 1174 |
-
566493
|
| 1175 |
-
512002
|
| 1176 |
-
329292
|
| 1177 |
-
388820
|
| 1178 |
-
671724
|
| 1179 |
-
617726
|
| 1180 |
-
474736
|
| 1181 |
-
848035
|
| 1182 |
-
773861
|
| 1183 |
-
848198
|
| 1184 |
-
262389
|
| 1185 |
-
515870
|
| 1186 |
-
666521
|
| 1187 |
-
435059
|
| 1188 |
-
160141
|
| 1189 |
-
357156
|
| 1190 |
-
354153
|
| 1191 |
-
577568
|
| 1192 |
-
754455
|
| 1193 |
-
538399
|
| 1194 |
-
773162
|
| 1195 |
-
774964
|
| 1196 |
-
829167
|
| 1197 |
-
352486
|
| 1198 |
-
729390
|
| 1199 |
-
416243
|
| 1200 |
-
210256
|
| 1201 |
-
678118
|
| 1202 |
-
483211
|
| 1203 |
-
463489
|
| 1204 |
-
325276
|
| 1205 |
-
307002
|
| 1206 |
-
774534
|
| 1207 |
-
764333
|
| 1208 |
-
774735
|
| 1209 |
-
831473
|
| 1210 |
-
593532
|
| 1211 |
-
578456
|
| 1212 |
-
457953
|
| 1213 |
-
707019
|
| 1214 |
-
839802
|
| 1215 |
-
630050
|
| 1216 |
-
800415
|
| 1217 |
-
829507
|
| 1218 |
-
680790
|
| 1219 |
-
838764
|
| 1220 |
-
839518
|
| 1221 |
-
720132
|
| 1222 |
-
829687
|
| 1223 |
-
652538
|
| 1224 |
-
457196
|
| 1225 |
-
326422
|
| 1226 |
-
850059
|
| 1227 |
-
775165
|
| 1228 |
-
609235
|
| 1229 |
-
686553
|
| 1230 |
-
616530
|
| 1231 |
-
603411
|
| 1232 |
-
798750
|
| 1233 |
-
727465
|
| 1234 |
-
412806
|
| 1235 |
-
791309
|
| 1236 |
-
795437
|
| 1237 |
-
351855
|
| 1238 |
-
460218
|
| 1239 |
-
758040
|
| 1240 |
-
317838
|
| 1241 |
-
718434
|
| 1242 |
-
608517
|
| 1243 |
-
620030
|
| 1244 |
-
777598
|
| 1245 |
-
706089
|
| 1246 |
-
523436
|
| 1247 |
-
447068
|
| 1248 |
-
740285
|
| 1249 |
-
572280
|
| 1250 |
-
772627
|
| 1251 |
-
753956
|
| 1252 |
-
528122
|
| 1253 |
-
588822
|
| 1254 |
-
599880
|
| 1255 |
-
699150
|
| 1256 |
-
433901
|
| 1257 |
-
567134
|
| 1258 |
-
838503
|
| 1259 |
-
751496
|
| 1260 |
-
544867
|
| 1261 |
-
756979
|
| 1262 |
-
343980
|
| 1263 |
-
709145
|
| 1264 |
-
491805
|
| 1265 |
-
831222
|
| 1266 |
-
194758
|
| 1267 |
-
765088
|
| 1268 |
-
516829
|
| 1269 |
-
165231
|
| 1270 |
-
499768
|
| 1271 |
-
735669
|
| 1272 |
-
850411
|
| 1273 |
-
405549
|
| 1274 |
-
804406
|
| 1275 |
-
752903
|
| 1276 |
-
841381
|
| 1277 |
-
794698
|
| 1278 |
-
833250
|
| 1279 |
-
579185
|
| 1280 |
-
524593
|
| 1281 |
-
825366
|
| 1282 |
-
458244
|
| 1283 |
-
815934
|
| 1284 |
-
772913
|
| 1285 |
-
683495
|
| 1286 |
-
584807
|
| 1287 |
-
757426
|
| 1288 |
-
836155
|
| 1289 |
-
835115
|
| 1290 |
-
808149
|
| 1291 |
-
777807
|
| 1292 |
-
695826
|
| 1293 |
-
808334
|
| 1294 |
-
808679
|
| 1295 |
-
828769
|
| 1296 |
-
778433
|
| 1297 |
-
450449
|
| 1298 |
-
836067
|
| 1299 |
-
827948
|
| 1300 |
-
753377
|
| 1301 |
-
799557
|
| 1302 |
-
720331
|
| 1303 |
-
570707
|
| 1304 |
-
798407
|
| 1305 |
-
750244
|
| 1306 |
-
565263
|
| 1307 |
-
777485
|
| 1308 |
-
795890
|
| 1309 |
-
535202
|
| 1310 |
-
837373
|
| 1311 |
-
452341
|
| 1312 |
-
713659
|
| 1313 |
-
746514
|
| 1314 |
-
729526
|
| 1315 |
-
721325
|
| 1316 |
-
689053
|
| 1317 |
-
720771
|
| 1318 |
-
197126
|
| 1319 |
-
735816
|
| 1320 |
-
534917
|
| 1321 |
-
455170
|
| 1322 |
-
833637
|
| 1323 |
-
489544
|
| 1324 |
-
795877
|
| 1325 |
-
849816
|
| 1326 |
-
485751
|
| 1327 |
-
602190
|
| 1328 |
-
733130
|
| 1329 |
-
788490
|
| 1330 |
-
846042
|
| 1331 |
-
846255
|
| 1332 |
-
712508
|
| 1333 |
-
770568
|
| 1334 |
-
824825
|
| 1335 |
-
460591
|
| 1336 |
-
506006
|
| 1337 |
-
751825
|
| 1338 |
-
843299
|
| 1339 |
-
540958
|
| 1340 |
-
849591
|
| 1341 |
-
745552
|
| 1342 |
-
771295
|
| 1343 |
-
728321
|
| 1344 |
-
705989
|
| 1345 |
-
155950
|
| 1346 |
-
443314
|
| 1347 |
-
756951
|
| 1348 |
-
762908
|
| 1349 |
-
849898
|
| 1350 |
-
835359
|
| 1351 |
-
729308
|
| 1352 |
-
304071
|
| 1353 |
-
696399
|
| 1354 |
-
834602
|
| 1355 |
-
569686
|
| 1356 |
-
420919
|
| 1357 |
-
438723
|
| 1358 |
-
391850
|
| 1359 |
-
774233
|
| 1360 |
-
834762
|
| 1361 |
-
729452
|
| 1362 |
-
774969
|
| 1363 |
-
679020
|
| 1364 |
-
715416
|
| 1365 |
-
821849
|
| 1366 |
-
419825
|
| 1367 |
-
827978
|
| 1368 |
-
816552
|
| 1369 |
-
759529
|
| 1370 |
-
797211
|
| 1371 |
-
463580
|
| 1372 |
-
615596
|
| 1373 |
-
801606
|
| 1374 |
-
785093
|
| 1375 |
-
841406
|
| 1376 |
-
661159
|
| 1377 |
-
561392
|
| 1378 |
-
847685
|
| 1379 |
-
822594
|
| 1380 |
-
557748
|
| 1381 |
-
761928
|
| 1382 |
-
841793
|
| 1383 |
-
643369
|
| 1384 |
-
804247
|
| 1385 |
-
782776
|
| 1386 |
-
583802
|
| 1387 |
-
712950
|
| 1388 |
-
575310
|
| 1389 |
-
796615
|
| 1390 |
-
722349
|
| 1391 |
-
781751
|
| 1392 |
-
436524
|
| 1393 |
-
510269
|
| 1394 |
-
437761
|
| 1395 |
-
777042
|
| 1396 |
-
506448
|
| 1397 |
-
258998
|
| 1398 |
-
733687
|
| 1399 |
-
834927
|
| 1400 |
-
457292
|
| 1401 |
-
570132
|
| 1402 |
-
311220
|
| 1403 |
-
551959
|
| 1404 |
-
569995
|
| 1405 |
-
828964
|
| 1406 |
-
413817
|
| 1407 |
-
365300
|
| 1408 |
-
640342
|
| 1409 |
-
408746
|
| 1410 |
-
574908
|
| 1411 |
-
703756
|
| 1412 |
-
841092
|
| 1413 |
-
428366
|
| 1414 |
-
362743
|
| 1415 |
-
779165
|
| 1416 |
-
814151
|
| 1417 |
-
502492
|
| 1418 |
-
680634
|
| 1419 |
-
594887
|
| 1420 |
-
409555
|
| 1421 |
-
733254
|
| 1422 |
-
610485
|
| 1423 |
-
764870
|
| 1424 |
-
534955
|
| 1425 |
-
762394
|
| 1426 |
-
460589
|
| 1427 |
-
687841
|
| 1428 |
-
418079
|
| 1429 |
-
839673
|
| 1430 |
-
434494
|
| 1431 |
-
679834
|
| 1432 |
-
272533
|
| 1433 |
-
434588
|
| 1434 |
-
804302
|
| 1435 |
-
449622
|
| 1436 |
-
775812
|
| 1437 |
-
300120
|
| 1438 |
-
395952
|
| 1439 |
-
155031
|
| 1440 |
-
471907
|
| 1441 |
-
572932
|
| 1442 |
-
359665
|
| 1443 |
-
550289
|
| 1444 |
-
550403
|
| 1445 |
-
431921
|
| 1446 |
-
760602
|
| 1447 |
-
430841
|
| 1448 |
-
366889
|
| 1449 |
-
157681
|
| 1450 |
-
848962
|
| 1451 |
-
826441
|
| 1452 |
-
563822
|
| 1453 |
-
843214
|
| 1454 |
-
553960
|
| 1455 |
-
624883
|
| 1456 |
-
565638
|
| 1457 |
-
577780
|
| 1458 |
-
431172
|
| 1459 |
-
361908
|
| 1460 |
-
773685
|
| 1461 |
-
593330
|
| 1462 |
-
541065
|
| 1463 |
-
736795
|
| 1464 |
-
577891
|
| 1465 |
-
160559
|
| 1466 |
-
293346
|
| 1467 |
-
822731
|
| 1468 |
-
269580
|
| 1469 |
-
499837
|
| 1470 |
-
705653
|
| 1471 |
-
390930
|
| 1472 |
-
157210
|
| 1473 |
-
430128
|
| 1474 |
-
827738
|
| 1475 |
-
542944
|
| 1476 |
-
448815
|
| 1477 |
-
540805
|
| 1478 |
-
619866
|
| 1479 |
-
358346
|
| 1480 |
-
454917
|
| 1481 |
-
164268
|
| 1482 |
-
839565
|
| 1483 |
-
772151
|
| 1484 |
-
405436
|
| 1485 |
-
776012
|
| 1486 |
-
337449
|
| 1487 |
-
514344
|
| 1488 |
-
502224
|
| 1489 |
-
567378
|
| 1490 |
-
596266
|
| 1491 |
-
166418
|
| 1492 |
-
478773
|
| 1493 |
-
805097
|
| 1494 |
-
350812
|
| 1495 |
-
745551
|
| 1496 |
-
457693
|
| 1497 |
-
825654
|
| 1498 |
-
499938
|
| 1499 |
-
438923
|
| 1500 |
-
521837
|
| 1501 |
-
363145
|
| 1502 |
-
773596
|
| 1503 |
-
430777
|
| 1504 |
-
761625
|
| 1505 |
-
698752
|
| 1506 |
-
723477
|
| 1507 |
-
168443
|
| 1508 |
-
495904
|
| 1509 |
-
448159
|
| 1510 |
-
156281
|
| 1511 |
-
393016
|
| 1512 |
-
434645
|
| 1513 |
-
839436
|
| 1514 |
-
327029
|
| 1515 |
-
812542
|
| 1516 |
-
303393
|
| 1517 |
-
845571
|
| 1518 |
-
433623
|
| 1519 |
-
834055
|
| 1520 |
-
775164
|
| 1521 |
-
568650
|
| 1522 |
-
449642
|
| 1523 |
-
747488
|
| 1524 |
-
332951
|
| 1525 |
-
487824
|
| 1526 |
-
686845
|
| 1527 |
-
166894
|
| 1528 |
-
770763
|
| 1529 |
-
851009
|
| 1530 |
-
615852
|
| 1531 |
-
845541
|
| 1532 |
-
817628
|
| 1533 |
-
761792
|
| 1534 |
-
723505
|
| 1535 |
-
767853
|
| 1536 |
-
694782
|
| 1537 |
-
837217
|
| 1538 |
-
842573
|
| 1539 |
-
605551
|
| 1540 |
-
339431
|
| 1541 |
-
771309
|
| 1542 |
-
602919
|
| 1543 |
-
434193
|
| 1544 |
-
759161
|
| 1545 |
-
544248
|
| 1546 |
-
496581
|
| 1547 |
-
648933
|
| 1548 |
-
828083
|
| 1549 |
-
412870
|
| 1550 |
-
840671
|
| 1551 |
-
830250
|
| 1552 |
-
685200
|
| 1553 |
-
830469
|
| 1554 |
-
845839
|
| 1555 |
-
453697
|
| 1556 |
-
827482
|
| 1557 |
-
724288
|
| 1558 |
-
399710
|
| 1559 |
-
825553
|
| 1560 |
-
351893
|
| 1561 |
-
367398
|
| 1562 |
-
844401
|
| 1563 |
-
591452
|
| 1564 |
-
403387
|
| 1565 |
-
344199
|
| 1566 |
-
773663
|
| 1567 |
-
784620
|
| 1568 |
-
689351
|
| 1569 |
-
602059
|
| 1570 |
-
685654
|
| 1571 |
-
841930
|
| 1572 |
-
767634
|
| 1573 |
-
765370
|
| 1574 |
-
569897
|
| 1575 |
-
334395
|
| 1576 |
-
585247
|
| 1577 |
-
670045
|
| 1578 |
-
363151
|
| 1579 |
-
635351
|
| 1580 |
-
547507
|
| 1581 |
-
504921
|
| 1582 |
-
672165
|
| 1583 |
-
523987
|
| 1584 |
-
807279
|
| 1585 |
-
816851
|
| 1586 |
-
758521
|
| 1587 |
-
586662
|
| 1588 |
-
406132
|
| 1589 |
-
795745
|
| 1590 |
-
755621
|
| 1591 |
-
847213
|
| 1592 |
-
486315
|
| 1593 |
-
831696
|
| 1594 |
-
273995
|
| 1595 |
-
843422
|
| 1596 |
-
828454
|
| 1597 |
-
621291
|
| 1598 |
-
642480
|
| 1599 |
-
823558
|
| 1600 |
-
597420
|
| 1601 |
-
725965
|
| 1602 |
-
834417
|
| 1603 |
-
733173
|
| 1604 |
-
687533
|
| 1605 |
-
434024
|
| 1606 |
-
779510
|
| 1607 |
-
155902
|
| 1608 |
-
706661
|
| 1609 |
-
771245
|
| 1610 |
-
613716
|
| 1611 |
-
510306
|
| 1612 |
-
753585
|
| 1613 |
-
472940
|
| 1614 |
-
754376
|
| 1615 |
-
671807
|
| 1616 |
-
775768
|
| 1617 |
-
311394
|
| 1618 |
-
638502
|
| 1619 |
-
675477
|
| 1620 |
-
474162
|
| 1621 |
-
611352
|
| 1622 |
-
773172
|
| 1623 |
-
761951
|
| 1624 |
-
674808
|
| 1625 |
-
308752
|
| 1626 |
-
547174
|
| 1627 |
-
327358
|
| 1628 |
-
558322
|
| 1629 |
-
847738
|
| 1630 |
-
712433
|
| 1631 |
-
531413
|
| 1632 |
-
846014
|
| 1633 |
-
616993
|
| 1634 |
-
709678
|
| 1635 |
-
791903
|
| 1636 |
-
503901
|
| 1637 |
-
734598
|
| 1638 |
-
493008
|
| 1639 |
-
735644
|
| 1640 |
-
737120
|
| 1641 |
-
392563
|
| 1642 |
-
730235
|
| 1643 |
-
626019
|
| 1644 |
-
828974
|
| 1645 |
-
508935
|
| 1646 |
-
798521
|
| 1647 |
-
847092
|
| 1648 |
-
844869
|
| 1649 |
-
601205
|
| 1650 |
-
835032
|
| 1651 |
-
830129
|
| 1652 |
-
657434
|
| 1653 |
-
720864
|
| 1654 |
-
760642
|
| 1655 |
-
606614
|
| 1656 |
-
768254
|
| 1657 |
-
479280
|
| 1658 |
-
785339
|
| 1659 |
-
834105
|
| 1660 |
-
819206
|
| 1661 |
-
772552
|
| 1662 |
-
538198
|
| 1663 |
-
738311
|
| 1664 |
-
443019
|
| 1665 |
-
821509
|
| 1666 |
-
613153
|
| 1667 |
-
774364
|
| 1668 |
-
579087
|
| 1669 |
-
553589
|
| 1670 |
-
729227
|
| 1671 |
-
836426
|
| 1672 |
-
840382
|
| 1673 |
-
404686
|
| 1674 |
-
268652
|
| 1675 |
-
665534
|
| 1676 |
-
732888
|
| 1677 |
-
330883
|
| 1678 |
-
625976
|
| 1679 |
-
841175
|
| 1680 |
-
338514
|
| 1681 |
-
829697
|
| 1682 |
-
511748
|
| 1683 |
-
551622
|
| 1684 |
-
750997
|
| 1685 |
-
849183
|
| 1686 |
-
570855
|
| 1687 |
-
766345
|
| 1688 |
-
154064
|
| 1689 |
-
772310
|
| 1690 |
-
849936
|
| 1691 |
-
615498
|
| 1692 |
-
692115
|
| 1693 |
-
517561
|
| 1694 |
-
417479
|
| 1695 |
-
831200
|
| 1696 |
-
834171
|
| 1697 |
-
746940
|
| 1698 |
-
434913
|
| 1699 |
-
753067
|
| 1700 |
-
299937
|
| 1701 |
-
150674
|
| 1702 |
-
738375
|
| 1703 |
-
336019
|
| 1704 |
-
166687
|
| 1705 |
-
345727
|
| 1706 |
-
360946
|
| 1707 |
-
530905
|
| 1708 |
-
742921
|
| 1709 |
-
436877
|
| 1710 |
-
690061
|
| 1711 |
-
402743
|
| 1712 |
-
339479
|
| 1713 |
-
828216
|
| 1714 |
-
837992
|
| 1715 |
-
154208
|
| 1716 |
-
586160
|
| 1717 |
-
829174
|
| 1718 |
-
570194
|
| 1719 |
-
704866
|
| 1720 |
-
694971
|
| 1721 |
-
338142
|
| 1722 |
-
429780
|
| 1723 |
-
151422
|
| 1724 |
-
473416
|
| 1725 |
-
834159
|
| 1726 |
-
350911
|
| 1727 |
-
732558
|
| 1728 |
-
795909
|
| 1729 |
-
581380
|
| 1730 |
-
523262
|
| 1731 |
-
495509
|
| 1732 |
-
630550
|
| 1733 |
-
397410
|
| 1734 |
-
487279
|
| 1735 |
-
534654
|
| 1736 |
-
532824
|
| 1737 |
-
351531
|
| 1738 |
-
486714
|
| 1739 |
-
461608
|
| 1740 |
-
841522
|
| 1741 |
-
683190
|
| 1742 |
-
618523
|
| 1743 |
-
752213
|
| 1744 |
-
453634
|
| 1745 |
-
415146
|
| 1746 |
-
227791
|
| 1747 |
-
603654
|
| 1748 |
-
777531
|
| 1749 |
-
421667
|
| 1750 |
-
614655
|
| 1751 |
-
612611
|
| 1752 |
-
586305
|
| 1753 |
-
682812
|
| 1754 |
-
173469
|
| 1755 |
-
563880
|
| 1756 |
-
397527
|
| 1757 |
-
414444
|
| 1758 |
-
605840
|
| 1759 |
-
735451
|
| 1760 |
-
502730
|
| 1761 |
-
424688
|
| 1762 |
-
440572
|
| 1763 |
-
460504
|
| 1764 |
-
776378
|
| 1765 |
-
606145
|
| 1766 |
-
678077
|
| 1767 |
-
408619
|
| 1768 |
-
731714
|
| 1769 |
-
652419
|
| 1770 |
-
595248
|
| 1771 |
-
775727
|
| 1772 |
-
340744
|
| 1773 |
-
460354
|
| 1774 |
-
720490
|
| 1775 |
-
607602
|
| 1776 |
-
775761
|
| 1777 |
-
543968
|
| 1778 |
-
767639
|
| 1779 |
-
803730
|
| 1780 |
-
631663
|
| 1781 |
-
149785
|
| 1782 |
-
583545
|
| 1783 |
-
611544
|
| 1784 |
-
489988
|
| 1785 |
-
152922
|
| 1786 |
-
733228
|
| 1787 |
-
591431
|
| 1788 |
-
642203
|
| 1789 |
-
728875
|
| 1790 |
-
543446
|
| 1791 |
-
846297
|
| 1792 |
-
527581
|
| 1793 |
-
744708
|
| 1794 |
-
775564
|
| 1795 |
-
153730
|
| 1796 |
-
166391
|
| 1797 |
-
436116
|
| 1798 |
-
565800
|
| 1799 |
-
152027
|
| 1800 |
-
664178
|
| 1801 |
-
731548
|
| 1802 |
-
760546
|
| 1803 |
-
707788
|
| 1804 |
-
350423
|
| 1805 |
-
626941
|
| 1806 |
-
522563
|
| 1807 |
-
697439
|
| 1808 |
-
779912
|
| 1809 |
-
239933
|
| 1810 |
-
555016
|
| 1811 |
-
496846
|
| 1812 |
-
598598
|
| 1813 |
-
273513
|
| 1814 |
-
659404
|
| 1815 |
-
415055
|
| 1816 |
-
832241
|
| 1817 |
-
721827
|
| 1818 |
-
552338
|
| 1819 |
-
487612
|
| 1820 |
-
386853
|
| 1821 |
-
642602
|
| 1822 |
-
708097
|
| 1823 |
-
361685
|
| 1824 |
-
512935
|
| 1825 |
-
798360
|
| 1826 |
-
823250
|
| 1827 |
-
662149
|
| 1828 |
-
159051
|
| 1829 |
-
826653
|
| 1830 |
-
435323
|
| 1831 |
-
759520
|
| 1832 |
-
813021
|
| 1833 |
-
562437
|
| 1834 |
-
743755
|
| 1835 |
-
752606
|
| 1836 |
-
829857
|
| 1837 |
-
726499
|
| 1838 |
-
846473
|
| 1839 |
-
749831
|
| 1840 |
-
733169
|
| 1841 |
-
636818
|
| 1842 |
-
527840
|
| 1843 |
-
799831
|
| 1844 |
-
748320
|
| 1845 |
-
847682
|
| 1846 |
-
545258
|
| 1847 |
-
390245
|
| 1848 |
-
681315
|
| 1849 |
-
237311
|
| 1850 |
-
214008
|
| 1851 |
-
699302
|
| 1852 |
-
550407
|
| 1853 |
-
608228
|
| 1854 |
-
723193
|
| 1855 |
-
836853
|
| 1856 |
-
591341
|
| 1857 |
-
831587
|
| 1858 |
-
836635
|
| 1859 |
-
435025
|
| 1860 |
-
602017
|
| 1861 |
-
794449
|
| 1862 |
-
776672
|
| 1863 |
-
830742
|
| 1864 |
-
301016
|
| 1865 |
-
424760
|
| 1866 |
-
453648
|
| 1867 |
-
592894
|
| 1868 |
-
849022
|
| 1869 |
-
580504
|
| 1870 |
-
693808
|
| 1871 |
-
765963
|
| 1872 |
-
838409
|
| 1873 |
-
738066
|
| 1874 |
-
524286
|
| 1875 |
-
678550
|
| 1876 |
-
157353
|
| 1877 |
-
768755
|
| 1878 |
-
813988
|
| 1879 |
-
662289
|
| 1880 |
-
428231
|
| 1881 |
-
847714
|
| 1882 |
-
806980
|
| 1883 |
-
797709
|
| 1884 |
-
805026
|
| 1885 |
-
810285
|
| 1886 |
-
442671
|
| 1887 |
-
843939
|
| 1888 |
-
438785
|
| 1889 |
-
827891
|
| 1890 |
-
745141
|
| 1891 |
-
783589
|
| 1892 |
-
354135
|
| 1893 |
-
505785
|
| 1894 |
-
842322
|
| 1895 |
-
840193
|
| 1896 |
-
802536
|
| 1897 |
-
225055
|
| 1898 |
-
850374
|
| 1899 |
-
771838
|
| 1900 |
-
849190
|
| 1901 |
-
831838
|
| 1902 |
-
483096
|
| 1903 |
-
589663
|
| 1904 |
-
836727
|
| 1905 |
-
612081
|
| 1906 |
-
488848
|
| 1907 |
-
677432
|
| 1908 |
-
723228
|
| 1909 |
-
826250
|
| 1910 |
-
781770
|
| 1911 |
-
740321
|
| 1912 |
-
806097
|
| 1913 |
-
438250
|
| 1914 |
-
852480
|
| 1915 |
-
539817
|
| 1916 |
-
826028
|
| 1917 |
-
443305
|
| 1918 |
-
792437
|
| 1919 |
-
823053
|
| 1920 |
-
732914
|
| 1921 |
-
833901
|
| 1922 |
-
639505
|
| 1923 |
-
758194
|
| 1924 |
-
773938
|
| 1925 |
-
846057
|
| 1926 |
-
569667
|
| 1927 |
-
814206
|
| 1928 |
-
758294
|
| 1929 |
-
841927
|
| 1930 |
-
800730
|
| 1931 |
-
760790
|
| 1932 |
-
416653
|
| 1933 |
-
848484
|
| 1934 |
-
430472
|
| 1935 |
-
713877
|
| 1936 |
-
519244
|
| 1937 |
-
661733
|
| 1938 |
-
846455
|
| 1939 |
-
501384
|
| 1940 |
-
686894
|
| 1941 |
-
804882
|
| 1942 |
-
847719
|
| 1943 |
-
842887
|
| 1944 |
-
825646
|
| 1945 |
-
681762
|
| 1946 |
-
774345
|
| 1947 |
-
822383
|
| 1948 |
-
833276
|
| 1949 |
-
800907
|
| 1950 |
-
581711
|
| 1951 |
-
346599
|
| 1952 |
-
753006
|
| 1953 |
-
761578
|
| 1954 |
-
438563
|
| 1955 |
-
305361
|
| 1956 |
-
715812
|
| 1957 |
-
767551
|
| 1958 |
-
846229
|
| 1959 |
-
782383
|
| 1960 |
-
624355
|
| 1961 |
-
845961
|
| 1962 |
-
847356
|
| 1963 |
-
831597
|
| 1964 |
-
768090
|
| 1965 |
-
844093
|
| 1966 |
-
454967
|
| 1967 |
-
706662
|
| 1968 |
-
436816
|
| 1969 |
-
565757
|
| 1970 |
-
739106
|
| 1971 |
-
834225
|
| 1972 |
-
678976
|
| 1973 |
-
735138
|
| 1974 |
-
767954
|
| 1975 |
-
605403
|
| 1976 |
-
439883
|
| 1977 |
-
487610
|
| 1978 |
-
344023
|
| 1979 |
-
201579
|
| 1980 |
-
428351
|
| 1981 |
-
844677
|
| 1982 |
-
823777
|
| 1983 |
-
558391
|
| 1984 |
-
429248
|
| 1985 |
-
829767
|
| 1986 |
-
345217
|
| 1987 |
-
745077
|
| 1988 |
-
681571
|
| 1989 |
-
677235
|
| 1990 |
-
761617
|
| 1991 |
-
802193
|
| 1992 |
-
787066
|
| 1993 |
-
828357
|
| 1994 |
-
487174
|
| 1995 |
-
832681
|
| 1996 |
-
839785
|
| 1997 |
-
320283
|
| 1998 |
-
723502
|
| 1999 |
-
497967
|
| 2000 |
-
421222
|
| 2001 |
-
824102
|
| 2002 |
-
819148
|
| 2003 |
-
582853
|
| 2004 |
-
518576
|
| 2005 |
-
829649
|
| 2006 |
-
492100
|
| 2007 |
-
500499
|
| 2008 |
-
489654
|
| 2009 |
-
834335
|
| 2010 |
-
495488
|
| 2011 |
-
173613
|
| 2012 |
-
457296
|
| 2013 |
-
495411
|
| 2014 |
-
367340
|
| 2015 |
-
754308
|
| 2016 |
-
649386
|
| 2017 |
-
838208
|
| 2018 |
-
435193
|
| 2019 |
-
751668
|
| 2020 |
-
508425
|
| 2021 |
-
784876
|
| 2022 |
-
670452
|
| 2023 |
-
772720
|
| 2024 |
-
529578
|
| 2025 |
-
840644
|
| 2026 |
-
850311
|
| 2027 |
-
675479
|
| 2028 |
-
774553
|
| 2029 |
-
451963
|
| 2030 |
-
460507
|
| 2031 |
-
822336
|
| 2032 |
-
337172
|
| 2033 |
-
588733
|
| 2034 |
-
479100
|
| 2035 |
-
584743
|
| 2036 |
-
358101
|
| 2037 |
-
697219
|
| 2038 |
-
767832
|
| 2039 |
-
387287
|
| 2040 |
-
543271
|
| 2041 |
-
769136
|
| 2042 |
-
436940
|
| 2043 |
-
771675
|
| 2044 |
-
197395
|
| 2045 |
-
502407
|
| 2046 |
-
638623
|
| 2047 |
-
551929
|
| 2048 |
-
511588
|
| 2049 |
-
355258
|
| 2050 |
-
622014
|
| 2051 |
-
567538
|
| 2052 |
-
721641
|
| 2053 |
-
786209
|
| 2054 |
-
462361
|
| 2055 |
-
611162
|
| 2056 |
-
200077
|
| 2057 |
-
154641
|
| 2058 |
-
451339
|
| 2059 |
-
422177
|
| 2060 |
-
839787
|
| 2061 |
-
363004
|
| 2062 |
-
733013
|
| 2063 |
-
303530
|
| 2064 |
-
738813
|
| 2065 |
-
350223
|
| 2066 |
-
820285
|
| 2067 |
-
774426
|
| 2068 |
-
411332
|
| 2069 |
-
165090
|
| 2070 |
-
548821
|
| 2071 |
-
350934
|
| 2072 |
-
521641
|
| 2073 |
-
337528
|
| 2074 |
-
534185
|
| 2075 |
-
508419
|
| 2076 |
-
771475
|
| 2077 |
-
340925
|
| 2078 |
-
516097
|
| 2079 |
-
505395
|
| 2080 |
-
181439
|
| 2081 |
-
659416
|
| 2082 |
-
849275
|
| 2083 |
-
833587
|
| 2084 |
-
675628
|
| 2085 |
-
594853
|
| 2086 |
-
399393
|
| 2087 |
-
173337
|
| 2088 |
-
694966
|
| 2089 |
-
251815
|
| 2090 |
-
152806
|
| 2091 |
-
411509
|
| 2092 |
-
842175
|
| 2093 |
-
812900
|
| 2094 |
-
427090
|
| 2095 |
-
400666
|
| 2096 |
-
606965
|
| 2097 |
-
514131
|
| 2098 |
-
758274
|
| 2099 |
-
841169
|
| 2100 |
-
838958
|
| 2101 |
-
775742
|
| 2102 |
-
403734
|
| 2103 |
-
670441
|
| 2104 |
-
429405
|
| 2105 |
-
388103
|
| 2106 |
-
687017
|
| 2107 |
-
452558
|
| 2108 |
-
616402
|
| 2109 |
-
771734
|
| 2110 |
-
347784
|
| 2111 |
-
472860
|
| 2112 |
-
803139
|
| 2113 |
-
155260
|
| 2114 |
-
776662
|
| 2115 |
-
501247
|
| 2116 |
-
837358
|
| 2117 |
-
346174
|
| 2118 |
-
398106
|
| 2119 |
-
361326
|
| 2120 |
-
591345
|
| 2121 |
-
695256
|
| 2122 |
-
367763
|
| 2123 |
-
285533
|
| 2124 |
-
696714
|
| 2125 |
-
338957
|
| 2126 |
-
387326
|
| 2127 |
-
403138
|
| 2128 |
-
402901
|
| 2129 |
-
418077
|
| 2130 |
-
590550
|
| 2131 |
-
162812
|
| 2132 |
-
740260
|
| 2133 |
-
793770
|
| 2134 |
-
588926
|
| 2135 |
-
438320
|
| 2136 |
-
840501
|
| 2137 |
-
671540
|
| 2138 |
-
503800
|
| 2139 |
-
608408
|
| 2140 |
-
675753
|
| 2141 |
-
366430
|
| 2142 |
-
843878
|
| 2143 |
-
720951
|
| 2144 |
-
834504
|
| 2145 |
-
611997
|
| 2146 |
-
336527
|
| 2147 |
-
755145
|
| 2148 |
-
687090
|
| 2149 |
-
820547
|
| 2150 |
-
715811
|
| 2151 |
-
152025
|
| 2152 |
-
690732
|
| 2153 |
-
840934
|
| 2154 |
-
777617
|
| 2155 |
-
767814
|
| 2156 |
-
571810
|
| 2157 |
-
761256
|
| 2158 |
-
464485
|
| 2159 |
-
718041
|
| 2160 |
-
675190
|
| 2161 |
-
843221
|
| 2162 |
-
488540
|
| 2163 |
-
748647
|
| 2164 |
-
737565
|
| 2165 |
-
528774
|
| 2166 |
-
612808
|
| 2167 |
-
491633
|
| 2168 |
-
735886
|
| 2169 |
-
450070
|
| 2170 |
-
166814
|
| 2171 |
-
406151
|
| 2172 |
-
428664
|
| 2173 |
-
810488
|
| 2174 |
-
539078
|
| 2175 |
-
716250
|
| 2176 |
-
816500
|
| 2177 |
-
572096
|
| 2178 |
-
821456
|
| 2179 |
-
827844
|
| 2180 |
-
358615
|
| 2181 |
-
824552
|
| 2182 |
-
733722
|
| 2183 |
-
836242
|
| 2184 |
-
847033
|
| 2185 |
-
720689
|
| 2186 |
-
318712
|
| 2187 |
-
510920
|
| 2188 |
-
241211
|
| 2189 |
-
387189
|
| 2190 |
-
839879
|
| 2191 |
-
612488
|
| 2192 |
-
754809
|
| 2193 |
-
608229
|
| 2194 |
-
771401
|
| 2195 |
-
830060
|
| 2196 |
-
452575
|
| 2197 |
-
676540
|
| 2198 |
-
457070
|
| 2199 |
-
771930
|
| 2200 |
-
343593
|
| 2201 |
-
154377
|
| 2202 |
-
426837
|
| 2203 |
-
729581
|
| 2204 |
-
825466
|
| 2205 |
-
828371
|
| 2206 |
-
678711
|
| 2207 |
-
510434
|
| 2208 |
-
821268
|
| 2209 |
-
769515
|
| 2210 |
-
827434
|
| 2211 |
-
550520
|
| 2212 |
-
735472
|
| 2213 |
-
495290
|
| 2214 |
-
642142
|
| 2215 |
-
474674
|
| 2216 |
-
505558
|
| 2217 |
-
749131
|
| 2218 |
-
421943
|
| 2219 |
-
615298
|
| 2220 |
-
611813
|
| 2221 |
-
490879
|
| 2222 |
-
506761
|
| 2223 |
-
832580
|
| 2224 |
-
713731
|
| 2225 |
-
663750
|
| 2226 |
-
393174
|
| 2227 |
-
723281
|
| 2228 |
-
415140
|
| 2229 |
-
689986
|
| 2230 |
-
410381
|
| 2231 |
-
823928
|
| 2232 |
-
346396
|
| 2233 |
-
843516
|
| 2234 |
-
617143
|
| 2235 |
-
475199
|
| 2236 |
-
731401
|
| 2237 |
-
720876
|
| 2238 |
-
808551
|
| 2239 |
-
802676
|
| 2240 |
-
843016
|
| 2241 |
-
778134
|
| 2242 |
-
163619
|
| 2243 |
-
333509
|
| 2244 |
-
725662
|
| 2245 |
-
596983
|
| 2246 |
-
211229
|
| 2247 |
-
759823
|
| 2248 |
-
183110
|
| 2249 |
-
491302
|
| 2250 |
-
569608
|
| 2251 |
-
406919
|
| 2252 |
-
363308
|
| 2253 |
-
515922
|
| 2254 |
-
647094
|
| 2255 |
-
512435
|
| 2256 |
-
620953
|
| 2257 |
-
756542
|
| 2258 |
-
718583
|
| 2259 |
-
693893
|
| 2260 |
-
168228
|
| 2261 |
-
849118
|
| 2262 |
-
709275
|
| 2263 |
-
622557
|
| 2264 |
-
773334
|
| 2265 |
-
830022
|
| 2266 |
-
502103
|
| 2267 |
-
575613
|
| 2268 |
-
750161
|
| 2269 |
-
157152
|
| 2270 |
-
779488
|
| 2271 |
-
501046
|
| 2272 |
-
772859
|
| 2273 |
-
361728
|
| 2274 |
-
659697
|
| 2275 |
-
809846
|
| 2276 |
-
786289
|
| 2277 |
-
322751
|
| 2278 |
-
851082
|
| 2279 |
-
840770
|
| 2280 |
-
599699
|
| 2281 |
-
568127
|
| 2282 |
-
829141
|
| 2283 |
-
711809
|
| 2284 |
-
704719
|
| 2285 |
-
743663
|
| 2286 |
-
839993
|
| 2287 |
-
636226
|
| 2288 |
-
769501
|
| 2289 |
-
630823
|
| 2290 |
-
819378
|
| 2291 |
-
364136
|
| 2292 |
-
729248
|
| 2293 |
-
727992
|
| 2294 |
-
609614
|
| 2295 |
-
667746
|
| 2296 |
-
826492
|
| 2297 |
-
420645
|
| 2298 |
-
631742
|
| 2299 |
-
772917
|
| 2300 |
-
751921
|
| 2301 |
-
585898
|
| 2302 |
-
462309
|
| 2303 |
-
785612
|
| 2304 |
-
713323
|
| 2305 |
-
777637
|
| 2306 |
-
363789
|
| 2307 |
-
846456
|
| 2308 |
-
594372
|
| 2309 |
-
347118
|
| 2310 |
-
806693
|
| 2311 |
-
835220
|
| 2312 |
-
826332
|
| 2313 |
-
841134
|
| 2314 |
-
548907
|
| 2315 |
-
827088
|
| 2316 |
-
774271
|
| 2317 |
-
325718
|
| 2318 |
-
346950
|
| 2319 |
-
846158
|
| 2320 |
-
474360
|
| 2321 |
-
621056
|
| 2322 |
-
484010
|
| 2323 |
-
799329
|
| 2324 |
-
444224
|
| 2325 |
-
355946
|
| 2326 |
-
484929
|
| 2327 |
-
510299
|
| 2328 |
-
271870
|
| 2329 |
-
398816
|
| 2330 |
-
510122
|
| 2331 |
-
511999
|
| 2332 |
-
576646
|
| 2333 |
-
152332
|
| 2334 |
-
408937
|
| 2335 |
-
521758
|
| 2336 |
-
775032
|
| 2337 |
-
722477
|
| 2338 |
-
165310
|
| 2339 |
-
580513
|
| 2340 |
-
519939
|
| 2341 |
-
318944
|
| 2342 |
-
693062
|
| 2343 |
-
731291
|
| 2344 |
-
848780
|
| 2345 |
-
775955
|
| 2346 |
-
472237
|
| 2347 |
-
820198
|
| 2348 |
-
835683
|
| 2349 |
-
547760
|
| 2350 |
-
648097
|
| 2351 |
-
343936
|
| 2352 |
-
777941
|
| 2353 |
-
522897
|
| 2354 |
-
518144
|
| 2355 |
-
394353
|
| 2356 |
-
568211
|
| 2357 |
-
769152
|
| 2358 |
-
153759
|
| 2359 |
-
773007
|
| 2360 |
-
588551
|
| 2361 |
-
737786
|
| 2362 |
-
411322
|
| 2363 |
-
451734
|
| 2364 |
-
774654
|
| 2365 |
-
618016
|
| 2366 |
-
543893
|
| 2367 |
-
414790
|
| 2368 |
-
577017
|
| 2369 |
-
706018
|
| 2370 |
-
520311
|
| 2371 |
-
822740
|
| 2372 |
-
461517
|
| 2373 |
-
347050
|
| 2374 |
-
205106
|
| 2375 |
-
593906
|
| 2376 |
-
763988
|
| 2377 |
-
510621
|
| 2378 |
-
294981
|
| 2379 |
-
643795
|
| 2380 |
-
603892
|
| 2381 |
-
845705
|
| 2382 |
-
630891
|
| 2383 |
-
743236
|
| 2384 |
-
775712
|
| 2385 |
-
428381
|
| 2386 |
-
417382
|
| 2387 |
-
343237
|
| 2388 |
-
637494
|
| 2389 |
-
350535
|
| 2390 |
-
701582
|
| 2391 |
-
757525
|
| 2392 |
-
773072
|
| 2393 |
-
479456
|
| 2394 |
-
602171
|
| 2395 |
-
623149
|
| 2396 |
-
706580
|
| 2397 |
-
834629
|
| 2398 |
-
405881
|
| 2399 |
-
159060
|
| 2400 |
-
606365
|
| 2401 |
-
834514
|
| 2402 |
-
842375
|
| 2403 |
-
510559
|
| 2404 |
-
849945
|
| 2405 |
-
849094
|
| 2406 |
-
754685
|
| 2407 |
-
490135
|
| 2408 |
-
388292
|
| 2409 |
-
622761
|
| 2410 |
-
769160
|
| 2411 |
-
834816
|
| 2412 |
-
632928
|
| 2413 |
-
782359
|
| 2414 |
-
334975
|
| 2415 |
-
754544
|
| 2416 |
-
690976
|
| 2417 |
-
433374
|
| 2418 |
-
678519
|
| 2419 |
-
730623
|
| 2420 |
-
514396
|
| 2421 |
-
541303
|
| 2422 |
-
605460
|
| 2423 |
-
687273
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Data/not_active_drumeo_camp.csv
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Dockerfile
DELETED
|
@@ -1,37 +0,0 @@
|
|
| 1 |
-
# ---------------------------------------------------------------------
|
| 2 |
-
# Base image – use the full tag so `wget` is available for the steps
|
| 3 |
-
FROM python:3.9
|
| 4 |
-
|
| 5 |
-
# ---------------------------------------------------------------------
|
| 6 |
-
# 1. Create UID-1000 account *and its home directory*.
|
| 7 |
-
RUN useradd -m -u 1000 user
|
| 8 |
-
|
| 9 |
-
# Environment: declare the home dir now (some HF-injected commands
|
| 10 |
-
# look at $HOME) but stay root for the next layers.
|
| 11 |
-
ENV HOME=/home/user \
|
| 12 |
-
PYTHONUNBUFFERED=1 \
|
| 13 |
-
PIP_NO_CACHE_DIR=1 \
|
| 14 |
-
PATH="$HOME/.local/bin:$PATH"
|
| 15 |
-
|
| 16 |
-
# ---------------------------------------------------------------------
|
| 17 |
-
# 2. Install Python dependencies **as root** so the console scripts
|
| 18 |
-
# land in /usr/local/bin (already on PATH at runtime).
|
| 19 |
-
WORKDIR /app
|
| 20 |
-
COPY requirements.txt /tmp/reqs.txt
|
| 21 |
-
RUN pip install --no-cache-dir -r /tmp/reqs.txt \
|
| 22 |
-
&& rm /tmp/reqs.txt
|
| 23 |
-
|
| 24 |
-
# ---------------------------------------------------------------------
|
| 25 |
-
# 3. Switch to the non-root user for the final image,
|
| 26 |
-
# then copy the source tree.
|
| 27 |
-
USER user
|
| 28 |
-
WORKDIR $HOME/app
|
| 29 |
-
COPY --chown=user . .
|
| 30 |
-
|
| 31 |
-
# ---------------------------------------------------------------------
|
| 32 |
-
# 4. Launch: $PORT is set by the platform at runtime; fall back to 8501
|
| 33 |
-
# for local docker runs.
|
| 34 |
-
CMD streamlit run app.py \
|
| 35 |
-
--server.port=${PORT:-8501} \
|
| 36 |
-
--server.headless true \
|
| 37 |
-
--server.address 0.0.0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/Message_generator_2.py
DELETED
|
@@ -1,253 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
THis class will generate message or messages based on the number of requested.
|
| 3 |
-
"""
|
| 4 |
-
import json
|
| 5 |
-
import time
|
| 6 |
-
from openai import OpenAI
|
| 7 |
-
from tqdm import tqdm
|
| 8 |
-
import streamlit as st
|
| 9 |
-
|
| 10 |
-
from Messaging_system.MultiMessage_2 import MultiMessage
|
| 11 |
-
from Messaging_system.protection_layer import ProtectionLayer
|
| 12 |
-
import openai
|
| 13 |
-
from Messaging_system.LLM import LLM
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class MessageGenerator:
|
| 17 |
-
|
| 18 |
-
def __init__(self, CoreConfig):
|
| 19 |
-
self.Core = CoreConfig
|
| 20 |
-
self.llm = LLM(CoreConfig)
|
| 21 |
-
|
| 22 |
-
# --------------------------------------------------------------
|
| 23 |
-
# --------------------------------------------------------------
|
| 24 |
-
def generate_messages(self, progress_callback):
|
| 25 |
-
"""
|
| 26 |
-
generating messages based on prompts for each user
|
| 27 |
-
:return: updating message column for each user
|
| 28 |
-
"""
|
| 29 |
-
|
| 30 |
-
total_users = len(self.Core.users_df)
|
| 31 |
-
st.write("Generating messages ... ")
|
| 32 |
-
|
| 33 |
-
self.Core.start_time = time.time()
|
| 34 |
-
for progress, (idx, row) in enumerate(tqdm(self.Core.users_df.iterrows(), desc="generating messages")):
|
| 35 |
-
# if we have a prompt to generate a personalized message
|
| 36 |
-
# Update progress if callback is provided
|
| 37 |
-
if progress_callback is not None:
|
| 38 |
-
progress_callback(progress, total_users)
|
| 39 |
-
|
| 40 |
-
if row["prompt"] is not None:
|
| 41 |
-
first_message = self.llm.get_response(prompt=row["prompt"], instructions=self.llm_instructions())
|
| 42 |
-
|
| 43 |
-
if first_message is not None:
|
| 44 |
-
# adding protection layer
|
| 45 |
-
# protect = ProtectionLayer(CoreConfig=self.Core)
|
| 46 |
-
# message, total_tokens = protect.criticize(message=first_message, user=row)
|
| 47 |
-
message = first_message
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
# updating tokens
|
| 51 |
-
# self.Core.total_tokens['prompt_tokens'] += total_tokens['prompt_tokens']
|
| 52 |
-
# self.Core.total_tokens['completion_tokens'] += total_tokens['completion_tokens']
|
| 53 |
-
# self.Core.temp_token_counter += total_tokens['prompt_tokens'] + total_tokens['completion_tokens']
|
| 54 |
-
|
| 55 |
-
# double check output structure
|
| 56 |
-
if isinstance(message, dict) and "message" in message and isinstance(message["message"], str):
|
| 57 |
-
# parsing output result
|
| 58 |
-
message = self.parsing_output_message(message, row)
|
| 59 |
-
self.Core.users_df.at[idx, "message"] = message
|
| 60 |
-
row["message"] = message
|
| 61 |
-
else:
|
| 62 |
-
self.Core.users_df.at[idx, "message"] = None
|
| 63 |
-
self.Core.checkpoint()
|
| 64 |
-
self.Core.respect_request_ratio()
|
| 65 |
-
else:
|
| 66 |
-
self.Core.users_df.at[idx, "message"] = None
|
| 67 |
-
|
| 68 |
-
# generating subsequence messages if needed:
|
| 69 |
-
if isinstance(self.Core.subsequence_messages, dict) and len(self.Core.subsequence_messages.keys()) > 1 and \
|
| 70 |
-
self.Core.users_df.at[idx, "message"] is not None and row["message"] is not None:
|
| 71 |
-
MM = MultiMessage(self.Core)
|
| 72 |
-
message = MM.generate_multi_messages(row)
|
| 73 |
-
self.Core.users_df.at[idx, "message"] = message
|
| 74 |
-
|
| 75 |
-
else:
|
| 76 |
-
# ---------------------------------------------------------
|
| 77 |
-
# SINGLE-MESSAGE path
|
| 78 |
-
# ---------------------------------------------------------
|
| 79 |
-
single_msg = row["message"] or self.Core.users_df.at[idx, "message"]
|
| 80 |
-
if single_msg is not None:
|
| 81 |
-
# If the single message is still a JSON string, turn it into a dict first
|
| 82 |
-
if isinstance(single_msg, str):
|
| 83 |
-
try:
|
| 84 |
-
single_msg = json.loads(single_msg)
|
| 85 |
-
except json.JSONDecodeError:
|
| 86 |
-
# leave it as-is if it’s not valid JSON
|
| 87 |
-
pass
|
| 88 |
-
|
| 89 |
-
msg_wrapper = {"messages_sequence": [single_msg]}
|
| 90 |
-
# Again, store a proper JSON string
|
| 91 |
-
self.Core.users_df.at[idx, "message"] = json.dumps(msg_wrapper,
|
| 92 |
-
ensure_ascii=False)
|
| 93 |
-
|
| 94 |
-
else:
|
| 95 |
-
self.Core.users_df.at[idx, "message"] = None
|
| 96 |
-
|
| 97 |
-
return self.Core
|
| 98 |
-
|
| 99 |
-
# --------------------------------------------------------------
|
| 100 |
-
# --------------------------------------------------------------
|
| 101 |
-
def parsing_output_message(self, message, user):
|
| 102 |
-
"""
|
| 103 |
-
Parses the output JSON from the LLM and enriches it with additional content information if needed.
|
| 104 |
-
|
| 105 |
-
:param message: Output JSON from LLM (expected to have at least a "message" key)
|
| 106 |
-
:param user: The user row
|
| 107 |
-
:return: Parsed and enriched output as a JSON object
|
| 108 |
-
"""
|
| 109 |
-
if self.Core.messaging_mode == "message":
|
| 110 |
-
# Only "message" is expected when messaging mode is message and we are not recommending any other content from input
|
| 111 |
-
if "message" not in message or "header" not in message:
|
| 112 |
-
print("LLM output is missing 'message'.")
|
| 113 |
-
return None
|
| 114 |
-
|
| 115 |
-
output_message = {"header": message["header"], "message": message["message"], "web_url_path": user["recsys_result"]}
|
| 116 |
-
|
| 117 |
-
if self.Core.messaging_mode == "recsys_result":
|
| 118 |
-
output_message = self.fetch_recommendation_data(user, message)
|
| 119 |
-
elif self.Core.messaging_mode == "recommend_playlist":
|
| 120 |
-
# adding playlist url to the message
|
| 121 |
-
if "playlist_id" in message and "message" in message:
|
| 122 |
-
playlist_id = str(message["playlist_id"])
|
| 123 |
-
web_url_path = f"https://www.musora.com/{self.Core.brand}/playlist/{playlist_id}"
|
| 124 |
-
# Add these to the message dict
|
| 125 |
-
output_message = {
|
| 126 |
-
"header": message["header"],
|
| 127 |
-
"message": message["message"],
|
| 128 |
-
"playlist_id": int(message["playlist_id"]),
|
| 129 |
-
"web_url_path": web_url_path,
|
| 130 |
-
}
|
| 131 |
-
|
| 132 |
-
return json.dumps(output_message, ensure_ascii=False)
|
| 133 |
-
|
| 134 |
-
# --------------------------------------------------------------
|
| 135 |
-
# --------------------------------------------------------------
|
| 136 |
-
def fetch_recommendation_data(self, user, message):
|
| 137 |
-
|
| 138 |
-
user_id = user["user_id"]
|
| 139 |
-
content_id = int(user["recommendation"])
|
| 140 |
-
recsys_json_str = user["recsys_result"]
|
| 141 |
-
recsys_data = json.loads(recsys_json_str)
|
| 142 |
-
|
| 143 |
-
# Initialize variables to store found item and category
|
| 144 |
-
found_item = None
|
| 145 |
-
|
| 146 |
-
# Search through all categories in the recsys data
|
| 147 |
-
for category, items in recsys_data.items():
|
| 148 |
-
for item in items:
|
| 149 |
-
if item.get("content_id") == content_id:
|
| 150 |
-
found_item = item
|
| 151 |
-
break # Exit inner loop if item is found
|
| 152 |
-
if found_item:
|
| 153 |
-
break # Exit outer loop if item is found
|
| 154 |
-
|
| 155 |
-
if not found_item:
|
| 156 |
-
print(f"content_id {content_id} not found in recsys_data for user_id {user_id}.")
|
| 157 |
-
return None
|
| 158 |
-
|
| 159 |
-
# Extract required fields from found_item
|
| 160 |
-
web_url_path = found_item.get("web_url_path")
|
| 161 |
-
title = found_item.get("title")
|
| 162 |
-
thumbnail_url = found_item.get("thumbnail_url")
|
| 163 |
-
|
| 164 |
-
message["message"].replace('\\', '').replace('"', '')
|
| 165 |
-
|
| 166 |
-
# Add these to the message dict
|
| 167 |
-
output_message = {
|
| 168 |
-
"header": message.get("header"),
|
| 169 |
-
"message": message.get("message"),
|
| 170 |
-
"content_id": content_id,
|
| 171 |
-
"web_url_path": web_url_path,
|
| 172 |
-
"title": title,
|
| 173 |
-
"thumbnail_url": thumbnail_url
|
| 174 |
-
}
|
| 175 |
-
return output_message
|
| 176 |
-
|
| 177 |
-
# --------------------------------------------------------------
|
| 178 |
-
# --------------------------------------------------------------
|
| 179 |
-
def llm_instructions(self):
|
| 180 |
-
"""
|
| 181 |
-
Setting instructions for llm
|
| 182 |
-
:return: instructions as string
|
| 183 |
-
"""
|
| 184 |
-
|
| 185 |
-
jargon_list = "\n".join(f"- {word}" for word in self.Core.config_file["AI_Jargon"])
|
| 186 |
-
|
| 187 |
-
# instructions = f"""
|
| 188 |
-
# You are a copywriter. Your task is to write a 'header' and a 'message' as a push notification for a {self.Core.get_instrument()} student that sounds like natural everyday speech: friendly, concise, no jargon, and following the instructions.
|
| 189 |
-
# Write a SUPER CASUAL and NATURAL push notification, as if you are chatting over coffee. Avoid odd phrasings.
|
| 190 |
-
#
|
| 191 |
-
# ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 192 |
-
# the header and the message **MUST NOT** contain any banned word or phrases(case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 193 |
-
#
|
| 194 |
-
# Banned word:
|
| 195 |
-
# {jargon_list}
|
| 196 |
-
#
|
| 197 |
-
# Banned phrases:
|
| 198 |
-
# Voice is NOT an instrument, so avoid phrases like below:
|
| 199 |
-
# - Your voice is waiting
|
| 200 |
-
# - Your voice awaits
|
| 201 |
-
# - Your voice needs you
|
| 202 |
-
# - Your voice is calling
|
| 203 |
-
# - Your voice deserves more
|
| 204 |
-
# - Hit the high notes / Hit those notes
|
| 205 |
-
# - ...
|
| 206 |
-
# """
|
| 207 |
-
|
| 208 |
-
instructions = f"""
|
| 209 |
-
|
| 210 |
-
You are a copywriter. Your task is to write a 'header' and a 'message' as a push notification for a {self.Core.get_instrument()} student. It is critical that the message sounds like natural, everyday speech: friendly, concise, no jargon, and it must follow the instructions.
|
| 211 |
-
Write a SUPER CASUAL and NATURAL push notification, as if you are chatting over coffee. Avoid odd phrasings. The message should sound like something that a {self.Core.get_instrument()} instructor would realistically say to a student in a daily conversation. Here are some examples of things that an instructor would realistically say to a student, to give you a general sense of tone and phrasing:
|
| 212 |
-
|
| 213 |
-
Common instructor phrases:
|
| 214 |
-
{self.Core.brand_voice}
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 218 |
-
the header and the message **MUST NOT** contain any banned word or phrases(case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 219 |
-
|
| 220 |
-
Banned word:
|
| 221 |
-
{jargon_list}
|
| 222 |
-
|
| 223 |
-
Banned phrases:
|
| 224 |
-
Voice is NOT an instrument, so avoid phrases like below:
|
| 225 |
-
- Your voice is waiting
|
| 226 |
-
- Your voice awaits
|
| 227 |
-
- Your voice needs you
|
| 228 |
-
- Your voice is calling
|
| 229 |
-
- Your voice deserves more
|
| 230 |
-
- Hit the high notes / Hit those notes
|
| 231 |
-
- ...
|
| 232 |
-
|
| 233 |
-
"""
|
| 234 |
-
|
| 235 |
-
banned = """
|
| 236 |
-
ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 237 |
-
the header and the message **MUST NOT** contain any banned word or phrases(case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 238 |
-
|
| 239 |
-
Banned word:
|
| 240 |
-
{jargon_list}
|
| 241 |
-
|
| 242 |
-
Banned phrases:
|
| 243 |
-
Voice is NOT an instrument, so avoid phrases like below:
|
| 244 |
-
- Your voice is waiting
|
| 245 |
-
- Your voice awaits
|
| 246 |
-
- Your voice needs you
|
| 247 |
-
- Your voice is calling
|
| 248 |
-
- Your voice deserves more
|
| 249 |
-
- Hit the high notes / Hit those notes
|
| 250 |
-
- ...
|
| 251 |
-
"""
|
| 252 |
-
|
| 253 |
-
return instructions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/MultiMessage.py
DELETED
|
@@ -1,406 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import random
|
| 3 |
-
import time
|
| 4 |
-
import pandas as pd
|
| 5 |
-
from openai import OpenAI
|
| 6 |
-
from Messaging_system.LLMR import LLMR
|
| 7 |
-
from Messaging_system.PromptGenerator import PromptGenerator
|
| 8 |
-
from Messaging_system.PromptEng import PromptEngine
|
| 9 |
-
from Messaging_system.protection_layer import ProtectionLayer
|
| 10 |
-
import openai
|
| 11 |
-
from Messaging_system.LLM import LLM
|
| 12 |
-
from copy import deepcopy
|
| 13 |
-
from Messaging_system.Homepage_Recommender import DefaultRec
|
| 14 |
-
|
| 15 |
-
class MultiMessage:
|
| 16 |
-
def __init__(self, CoreConfig):
|
| 17 |
-
"""
|
| 18 |
-
Class that generates a sequence of messages (multi-step push notifications)
|
| 19 |
-
for each user, building on previously generated messages.
|
| 20 |
-
"""
|
| 21 |
-
self.Core = CoreConfig
|
| 22 |
-
self.llm = LLM(CoreConfig)
|
| 23 |
-
self.defaultRec = DefaultRec(CoreConfig)
|
| 24 |
-
self.promptGen=PromptGenerator(self.Core)
|
| 25 |
-
|
| 26 |
-
if self.Core.involve_recsys_result:
|
| 27 |
-
self.llmr = LLMR(CoreConfig, random=True)
|
| 28 |
-
|
| 29 |
-
# ==============================================================
|
| 30 |
-
def generate_multi_messages(self, user):
|
| 31 |
-
"""
|
| 32 |
-
Generates multiple messages per user, storing them in a single JSON structure.
|
| 33 |
-
The first message is assumed to already exist in user["message"].
|
| 34 |
-
Subsequent messages are generated by referencing all previously generated ones.
|
| 35 |
-
"""
|
| 36 |
-
first_message_str = user.get("message", None)
|
| 37 |
-
if first_message_str is None:
|
| 38 |
-
print("No initial message found; cannot build a multi-message sequence.")
|
| 39 |
-
return None
|
| 40 |
-
|
| 41 |
-
try:
|
| 42 |
-
first_message_dict = json.loads(first_message_str)
|
| 43 |
-
except (json.JSONDecodeError, TypeError):
|
| 44 |
-
print("Could not parse the first message as JSON. Returning None.")
|
| 45 |
-
return None
|
| 46 |
-
|
| 47 |
-
message_sequence = [first_message_dict]
|
| 48 |
-
|
| 49 |
-
# how many total messages you want (self.Core.subsequence_messages is a dict)
|
| 50 |
-
total_configured = len(self.Core.subsequent_examples) + 1 # includes the first
|
| 51 |
-
to_generate = max(0, total_configured - 1)
|
| 52 |
-
|
| 53 |
-
# figure out DF index once
|
| 54 |
-
idx = self._get_user_idx(user)
|
| 55 |
-
|
| 56 |
-
for i in range(to_generate):
|
| 57 |
-
# The ordinal number of the next message in the sequence (first was #1)
|
| 58 |
-
msg_number = i + 2
|
| 59 |
-
|
| 60 |
-
# ---- (A) pick the next recommendation BEFORE generating the text if required ----
|
| 61 |
-
recommendation_info = content_info = recsys_json = None
|
| 62 |
-
zero_tokens = {"prompt_tokens": 0, "completion_tokens": 0}
|
| 63 |
-
|
| 64 |
-
if getattr(self.Core, "involve_recsys_result", False):
|
| 65 |
-
rec_info, cinfo, rjson = self.select_next_recommendation(user)
|
| 66 |
-
recommendation_info, content_info, recsys_json = rec_info, cinfo, rjson
|
| 67 |
-
|
| 68 |
-
if recommendation_info is None:
|
| 69 |
-
# fallback
|
| 70 |
-
content_id = self.defaultRec.recommendation
|
| 71 |
-
content_info = self.defaultRec.recommendation_info
|
| 72 |
-
recsys_json = self.defaultRec.for_you_url
|
| 73 |
-
# Update DF and local user snapshot
|
| 74 |
-
user = self._update_user_fields(idx, user,{
|
| 75 |
-
"recommendation": recommendation_info,
|
| 76 |
-
"recommendation_info": content_info,
|
| 77 |
-
"recsys_result": recsys_json
|
| 78 |
-
})
|
| 79 |
-
|
| 80 |
-
# ---- (B) actually generate the next message; hand it the UPDATED user ----
|
| 81 |
-
next_msg_raw = self.generate_next_messages(message_sequence, msg_number, user)
|
| 82 |
-
if next_msg_raw is None:
|
| 83 |
-
print(f"Could not generate the message for step {msg_number}. Stopping.")
|
| 84 |
-
break
|
| 85 |
-
|
| 86 |
-
# If you have a protection layer, call it here (omitted for brevity)
|
| 87 |
-
criticized_msg = next_msg_raw
|
| 88 |
-
|
| 89 |
-
# ---- (C) Parse & validate ----
|
| 90 |
-
parsed_output_str = self.parsing_output_message(criticized_msg, user)
|
| 91 |
-
if not parsed_output_str:
|
| 92 |
-
print(f"Parsing output failed for step {msg_number}. Stopping.")
|
| 93 |
-
break
|
| 94 |
-
|
| 95 |
-
try:
|
| 96 |
-
parsed_output_dict = json.loads(parsed_output_str)
|
| 97 |
-
except json.JSONDecodeError:
|
| 98 |
-
print(f"Could not parse the new message as JSON for step {msg_number}. Stopping.")
|
| 99 |
-
break
|
| 100 |
-
|
| 101 |
-
message_sequence.append(parsed_output_dict)
|
| 102 |
-
|
| 103 |
-
final_structure = {"messages_sequence": message_sequence}
|
| 104 |
-
return json.dumps(final_structure, ensure_ascii=False)
|
| 105 |
-
|
| 106 |
-
# --------------------------------------------------------------
|
| 107 |
-
def generate_next_messages(self, previous_messages, step, user):
|
| 108 |
-
"""
|
| 109 |
-
Uses only the last two previously generated messages to produce the next message.
|
| 110 |
-
Returns a *raw* dictionary (header, message, etc.) from the LLM.
|
| 111 |
-
|
| 112 |
-
:param previous_messages: A list of dicts, each containing at least "header" and "message".
|
| 113 |
-
:param step: The 1-based index of the message we’re about to generate.
|
| 114 |
-
:return: A dictionary from LLM (with 'header' and 'message'), or None if generation fails.
|
| 115 |
-
"""
|
| 116 |
-
# Only keep up to the last two messages
|
| 117 |
-
if len(previous_messages) > 2:
|
| 118 |
-
context = previous_messages[-2:]
|
| 119 |
-
else:
|
| 120 |
-
context = previous_messages
|
| 121 |
-
|
| 122 |
-
# 1) Build a prompt that includes only those last two messages
|
| 123 |
-
prompt = self.generate_prompt(context, step, user)
|
| 124 |
-
|
| 125 |
-
# new_prompt = self.engine.prompt_engineering(prompt)
|
| 126 |
-
|
| 127 |
-
# 2) Call our existing LLM routine
|
| 128 |
-
response_dict = self.llm.get_response(prompt=prompt, instructions=self.llm_instructions())
|
| 129 |
-
|
| 130 |
-
return response_dict
|
| 131 |
-
|
| 132 |
-
# ===============================================================
|
| 133 |
-
def get_examples(self, step):
|
| 134 |
-
"""
|
| 135 |
-
providing examples and instructions
|
| 136 |
-
:return:
|
| 137 |
-
"""
|
| 138 |
-
|
| 139 |
-
if self.Core.subsequent_examples is not None:
|
| 140 |
-
|
| 141 |
-
instructions = f"""
|
| 142 |
-
# ** Example **
|
| 143 |
-
Below are some acceptable examples of the voice we want. Create a header and message that follow the same style, tone, vocabulary, and characteristics.
|
| 144 |
-
Mimic the example style as much as possible and make it personalized using provided information.
|
| 145 |
-
|
| 146 |
-
### **Good Examples:**
|
| 147 |
-
|
| 148 |
-
{self.Core.subsequent_examples[step]}
|
| 149 |
-
"""
|
| 150 |
-
return instructions
|
| 151 |
-
else:
|
| 152 |
-
return ""
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
# --------------------------------------------------------------
|
| 156 |
-
def generate_prompt(self, previous_messages, step, user):
|
| 157 |
-
"""
|
| 158 |
-
Creates a prompt to feed to the LLM, incorporating 3 previously generated messages.
|
| 159 |
-
|
| 160 |
-
:param previous_messages: A list of dicts, each containing 'header' and 'message'.
|
| 161 |
-
:return: A user-facing prompt string instructing the model to produce a new message.
|
| 162 |
-
"""
|
| 163 |
-
# Build a textual summary of previous messages - last three
|
| 164 |
-
recent_messages = previous_messages[-3:]
|
| 165 |
-
|
| 166 |
-
previous_text = []
|
| 167 |
-
for i, m in enumerate(recent_messages, start=1):
|
| 168 |
-
header = m.get("header", "").strip()
|
| 169 |
-
body = m.get("message", "").strip()
|
| 170 |
-
previous_text.append(f"Message {i}:\n header: {header}\n message: {body}")
|
| 171 |
-
|
| 172 |
-
# Combine into a single string
|
| 173 |
-
previous_text_str = "\n\n".join(previous_text)
|
| 174 |
-
|
| 175 |
-
user_info = self.promptGen.get_user_profile(user=user)
|
| 176 |
-
input_context = self.promptGen.input_context()
|
| 177 |
-
instructions = self.message_instructions(step)
|
| 178 |
-
recommendation_instructions = self.promptGen.recommendations_instructions(user)
|
| 179 |
-
output_instructions = self.promptGen.output_instruction()
|
| 180 |
-
examples = self.get_examples(step)
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
# Craft the prompt
|
| 184 |
-
prompt = f"""
|
| 185 |
-
We have previously sent these push notifications to the user and The user has not re-engaged yet:
|
| 186 |
-
|
| 187 |
-
** Previously sent push notifications: **
|
| 188 |
-
{previous_text_str}
|
| 189 |
-
|
| 190 |
-
** Objective**:
|
| 191 |
-
Write the *next* follow up personalized push notification following the instructions and what we know about the user.
|
| 192 |
-
|
| 193 |
-
{input_context}
|
| 194 |
-
- **Don't** use any emojis if we used emojis in previous messages.
|
| 195 |
-
- The "header" **MUST BE** different from the headers that we previously sent and we should not have similar words, variations and phrases from previous messages.
|
| 196 |
-
- The "message" **MUST BE** different from the messages that we previously sent and we should not have similar words, variations and phrases from previous messages.
|
| 197 |
-
|
| 198 |
-
{instructions}
|
| 199 |
-
|
| 200 |
-
{user_info}
|
| 201 |
-
|
| 202 |
-
{recommendation_instructions}
|
| 203 |
-
|
| 204 |
-
{examples}
|
| 205 |
-
|
| 206 |
-
{output_instructions}
|
| 207 |
-
"""
|
| 208 |
-
|
| 209 |
-
return prompt
|
| 210 |
-
|
| 211 |
-
# =============================================================
|
| 212 |
-
def message_instructions(self, step):
|
| 213 |
-
"""
|
| 214 |
-
message instructions for each step
|
| 215 |
-
:return:
|
| 216 |
-
"""
|
| 217 |
-
instructions= f"""
|
| 218 |
-
# **specific instructions**:
|
| 219 |
-
- {self.Core.subsequence_messages[step]}
|
| 220 |
-
"""
|
| 221 |
-
return instructions
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
# =============================================================================
|
| 225 |
-
def parsing_output_message(self, message, user):
|
| 226 |
-
"""
|
| 227 |
-
Parses the output JSON from the LLM and enriches it with additional content
|
| 228 |
-
information if needed (e.g., from recsys). Re-uses the logic from the single-message
|
| 229 |
-
pipeline to keep the results consistent.
|
| 230 |
-
|
| 231 |
-
:param message: Output JSON *dictionary* from the LLM (with at least "message" and "header").
|
| 232 |
-
:param user: The user row dictionary.
|
| 233 |
-
:return: A valid JSON string or None if the structure is invalid.
|
| 234 |
-
"""
|
| 235 |
-
if self.Core.involve_recsys_result:
|
| 236 |
-
# If recsys is used, fetch recommendation data
|
| 237 |
-
output_message = self.fetch_recommendation_data(user, message)
|
| 238 |
-
elif self.Core.messaging_mode == "recommend_playlist":
|
| 239 |
-
# If recommending a playlist, add the relevant fields
|
| 240 |
-
if "playlist_id" in message and "message" in message:
|
| 241 |
-
playlist_id = str(message["playlist_id"])
|
| 242 |
-
web_url_path = f"https://www.musora.com/{self.Core.brand}/playlist/{playlist_id}"
|
| 243 |
-
output_message = {
|
| 244 |
-
"header": message.get("header", ""),
|
| 245 |
-
"message": message.get("message", ""),
|
| 246 |
-
"playlist_id": int(message["playlist_id"]),
|
| 247 |
-
"web_url_path": web_url_path,
|
| 248 |
-
}
|
| 249 |
-
else:
|
| 250 |
-
print("LLM output is missing either 'playlist_id' or 'message'.")
|
| 251 |
-
return None
|
| 252 |
-
else:
|
| 253 |
-
# Basic scenario: Only 'header' and 'message' expected
|
| 254 |
-
if "message" not in message or "header" not in message:
|
| 255 |
-
print("LLM output is missing 'header' or 'message'.")
|
| 256 |
-
return None
|
| 257 |
-
output_message = {
|
| 258 |
-
"header": message["header"],
|
| 259 |
-
"message": message["message"]
|
| 260 |
-
}
|
| 261 |
-
|
| 262 |
-
return json.dumps(output_message, ensure_ascii=False)
|
| 263 |
-
|
| 264 |
-
# --------------------------------------------------------------
|
| 265 |
-
def fetch_recommendation_data(self, user, message):
|
| 266 |
-
|
| 267 |
-
if user["recommendation"] == "for_you":
|
| 268 |
-
output_message = {
|
| 269 |
-
"header": message.get("header"),
|
| 270 |
-
"message": message.get("message"),
|
| 271 |
-
"content_id": None,
|
| 272 |
-
"web_url_path": user["recsys_result"],
|
| 273 |
-
"title": user["recommendation"],
|
| 274 |
-
"thumbnail_url": None
|
| 275 |
-
}
|
| 276 |
-
else:
|
| 277 |
-
try:
|
| 278 |
-
recommendation_dict = user["recommendation"]
|
| 279 |
-
content_id = int(recommendation_dict["content_id"])
|
| 280 |
-
|
| 281 |
-
# Extract required fields from found_item
|
| 282 |
-
web_url_path = recommendation_dict["web_url_path"]
|
| 283 |
-
title = recommendation_dict["title"]
|
| 284 |
-
thumbnail_url = recommendation_dict["thumbnail_url"]
|
| 285 |
-
|
| 286 |
-
msg = message.get("message")
|
| 287 |
-
if isinstance(msg, str):
|
| 288 |
-
msg = msg.replace('\\', '').replace('"', '')
|
| 289 |
-
else:
|
| 290 |
-
msg = str(msg) # or handle it differently if this shouldn't happen
|
| 291 |
-
message["message"] = msg
|
| 292 |
-
|
| 293 |
-
# message["message"].replace('\\', '').replace('"', '')
|
| 294 |
-
|
| 295 |
-
# Add these to the message dict
|
| 296 |
-
output_message = {
|
| 297 |
-
"header": message.get("header"),
|
| 298 |
-
"message": message.get("message"),
|
| 299 |
-
"content_id": content_id,
|
| 300 |
-
"web_url_path": web_url_path,
|
| 301 |
-
"title": title,
|
| 302 |
-
"thumbnail_url": thumbnail_url
|
| 303 |
-
}
|
| 304 |
-
return output_message
|
| 305 |
-
except:
|
| 306 |
-
raise ValueError(f"Parsed response is not a dict: \n\n {message}")
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
# ===============================================================
|
| 310 |
-
def _remove_from_all(self, recsys_dict, cid):
|
| 311 |
-
for sec, recs in list(recsys_dict.items()):
|
| 312 |
-
if isinstance(recs, list):
|
| 313 |
-
recsys_dict[sec] = [r for r in recs if r.get("content_id") != cid]
|
| 314 |
-
return recsys_dict
|
| 315 |
-
|
| 316 |
-
# ===============================================================
|
| 317 |
-
def _lookup_content_info(self, cid):
|
| 318 |
-
row = self.Core.content_info[self.Core.content_info["content_id"] == cid]
|
| 319 |
-
return row["content_info"].iloc[0] if not row.empty else None
|
| 320 |
-
|
| 321 |
-
# ===============================================================
|
| 322 |
-
|
| 323 |
-
def select_next_recommendation(self, user):
|
| 324 |
-
"""
|
| 325 |
-
Select next recommendation from the user's current recsys_result.
|
| 326 |
-
Returns: content_id, content_info, updated_recsys_json
|
| 327 |
-
"""
|
| 328 |
-
self.llmr.user = user # _get_recommendation expects self.user to be set
|
| 329 |
-
cid, cinfo, updated_json, _ = self.llmr._get_recommendation()
|
| 330 |
-
return cid, cinfo, updated_json
|
| 331 |
-
|
| 332 |
-
# ==============================================================
|
| 333 |
-
def _get_user_idx(self, u):
|
| 334 |
-
# If it's a Series, its index label is usually the row index
|
| 335 |
-
if isinstance(u, pd.Series) and u.name in self.Core.users_df.index:
|
| 336 |
-
return u.name
|
| 337 |
-
# Otherwise try a stable key like user_id (change if your key is different)
|
| 338 |
-
key_col = "user_id" if "user_id" in self.Core.users_df.columns else None
|
| 339 |
-
if key_col and key_col in u:
|
| 340 |
-
matches = self.Core.users_df.index[self.Core.users_df[key_col] == u[key_col]]
|
| 341 |
-
if len(matches):
|
| 342 |
-
return matches[0]
|
| 343 |
-
# Fallback: try exact row equality (last resort; slower)
|
| 344 |
-
try:
|
| 345 |
-
return self.Core.users_df.index[self.Core.users_df.eq(pd.Series(u)).all(1)][0]
|
| 346 |
-
except Exception:
|
| 347 |
-
return None
|
| 348 |
-
# =============================================================
|
| 349 |
-
def _update_user_fields(self, idx, user, fields: dict):
|
| 350 |
-
"""Update DF row and return a fresh copy of the user row (Series) with those fields reflected."""
|
| 351 |
-
if idx is None:
|
| 352 |
-
# no index? just mutate the local dict/Series
|
| 353 |
-
for k, v in fields.items():
|
| 354 |
-
user[k] = v
|
| 355 |
-
return user
|
| 356 |
-
for k, v in fields.items():
|
| 357 |
-
self.Core.users_df.at[idx, k] = v
|
| 358 |
-
return self.Core.users_df.loc[idx]
|
| 359 |
-
|
| 360 |
-
# =======================================================================
|
| 361 |
-
|
| 362 |
-
def llm_instructions(self):
|
| 363 |
-
"""
|
| 364 |
-
Setting instructions for llm
|
| 365 |
-
:return: instructions as string
|
| 366 |
-
"""
|
| 367 |
-
|
| 368 |
-
jargon_list = "\n".join(f"- {word}" for word in self.Core.config_file["AI_Jargon"])
|
| 369 |
-
banned_phrases = self.Core.config_file.get(f"{self.Core.brand}_banned_phrases", None)
|
| 370 |
-
|
| 371 |
-
instructions = f"""
|
| 372 |
-
You are a copywriter. Your task is to write a 'header' and a 'message' as a push notification for a {self.Core.get_instrument()} student. It is critical that the message sounds like natural, everyday speech: friendly, concise, no jargon, and it must follow the instructions.
|
| 373 |
-
Write a SUPER CASUAL and NATURAL push notification, as if you are chatting over coffee. Avoid odd phrasings. The message should sound like something that a {self.Core.get_instrument()} instructor would realistically say to a student in a daily conversation.
|
| 374 |
-
|
| 375 |
-
"""
|
| 376 |
-
if self.Core.brand_voice is not None:
|
| 377 |
-
instructions += f"""
|
| 378 |
-
Here are some examples of things that an instructor would realistically say to a student, to give you a general sense of tone and phrasing:
|
| 379 |
-
|
| 380 |
-
Common instructor phrases:
|
| 381 |
-
{self.Core.brand_voice}
|
| 382 |
-
"""
|
| 383 |
-
|
| 384 |
-
rules = f"""
|
| 385 |
-
ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 386 |
-
the header and the message **MUST NOT** contain any banned word or phrases(case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 387 |
-
- **important Note:** header **must be** less than {self.Core.config_file["header_limit"]} characters and message **must be less** than {self.Core.config_file["message_limit"]} characters.
|
| 388 |
-
- The "header" **MUST BE** different from the headers that we previously sent and we should not have similar words, variations and phrases from previous messages.
|
| 389 |
-
- The "message" **MUST BE** different from the messages that we previously sent and we should not have similar words, variations and phrases from previous messages.
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
Banned word:
|
| 393 |
-
{jargon_list}
|
| 394 |
-
|
| 395 |
-
"""
|
| 396 |
-
if banned_phrases is not None:
|
| 397 |
-
rules += banned_phrases
|
| 398 |
-
|
| 399 |
-
final_instructions = f"""
|
| 400 |
-
{instructions}
|
| 401 |
-
{rules}
|
| 402 |
-
"""
|
| 403 |
-
|
| 404 |
-
return final_instructions
|
| 405 |
-
|
| 406 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/MultiMessage_2.py
DELETED
|
@@ -1,412 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import time
|
| 3 |
-
from openai import OpenAI
|
| 4 |
-
from Messaging_system.PromptGenerator_2 import PromptGenerator
|
| 5 |
-
from Messaging_system.PromptEng import PromptEngine
|
| 6 |
-
from Messaging_system.protection_layer import ProtectionLayer
|
| 7 |
-
import openai
|
| 8 |
-
from Messaging_system.LLM import LLM
|
| 9 |
-
|
| 10 |
-
class MultiMessage:
|
| 11 |
-
def __init__(self, CoreConfig):
|
| 12 |
-
"""
|
| 13 |
-
Class that generates a sequence of messages (multi-step push notifications)
|
| 14 |
-
for each user, building on previously generated messages.
|
| 15 |
-
"""
|
| 16 |
-
self.Core = CoreConfig
|
| 17 |
-
self.llm = LLM(CoreConfig)
|
| 18 |
-
self.engine = PromptEngine(self.Core)
|
| 19 |
-
self.promptGen = PromptGenerator(self.Core)
|
| 20 |
-
|
| 21 |
-
# --------------------------------------------------------------
|
| 22 |
-
def generate_multi_messages(self, user):
|
| 23 |
-
"""
|
| 24 |
-
Generates multiple messages per user, storing them in a single JSON structure.
|
| 25 |
-
The first message is assumed to already exist in user["message"].
|
| 26 |
-
Subsequent messages are generated by referencing all previously generated ones.
|
| 27 |
-
|
| 28 |
-
:param user: A row (dictionary-like) containing user data and the first message.
|
| 29 |
-
:return: JSON string containing the entire sequence of messages
|
| 30 |
-
(or None if something goes wrong).
|
| 31 |
-
"""
|
| 32 |
-
# 1) Get the first message if it exists
|
| 33 |
-
first_message_str = user.get("message", None)
|
| 34 |
-
if not first_message_str:
|
| 35 |
-
print("No initial message found; cannot build a multi-message sequence.")
|
| 36 |
-
return None
|
| 37 |
-
|
| 38 |
-
# Parse the first message as JSON
|
| 39 |
-
try:
|
| 40 |
-
first_message_dict = json.loads(first_message_str)
|
| 41 |
-
except (json.JSONDecodeError, TypeError):
|
| 42 |
-
print("Could not parse the first message as JSON. Returning None.")
|
| 43 |
-
return None
|
| 44 |
-
|
| 45 |
-
# Start our sequence with the first message
|
| 46 |
-
message_sequence = [first_message_dict]
|
| 47 |
-
|
| 48 |
-
# We'll reuse the same ProtectionLayer
|
| 49 |
-
# protect = ProtectionLayer(
|
| 50 |
-
# CoreConfig=self.Core
|
| 51 |
-
# )
|
| 52 |
-
|
| 53 |
-
# If user requested multiple messages, generate the rest
|
| 54 |
-
# number_of_messages is the *total* number of messages requested
|
| 55 |
-
total_to_generate = len(self.Core.subsequence_messages.keys())
|
| 56 |
-
|
| 57 |
-
# Already have the first message, so generate the next (n-1) messages
|
| 58 |
-
for step in range(1, total_to_generate + 1):
|
| 59 |
-
# 2) Generate the next message referencing all so-far messages
|
| 60 |
-
next_msg_raw = self.generate_next_messages(message_sequence, step+1, user)
|
| 61 |
-
if not next_msg_raw:
|
| 62 |
-
print(f"Could not generate the message for step {step}. Stopping.")
|
| 63 |
-
break
|
| 64 |
-
|
| 65 |
-
# 3) Pass it through the protection layer
|
| 66 |
-
# criticized_msg, tokens_used = protect.criticize(
|
| 67 |
-
# message=next_msg_raw,
|
| 68 |
-
# user=user
|
| 69 |
-
# )
|
| 70 |
-
criticized_msg = next_msg_raw
|
| 71 |
-
|
| 72 |
-
# Update token usage stats
|
| 73 |
-
# self.Core.total_tokens['prompt_tokens'] += tokens_used['prompt_tokens']
|
| 74 |
-
# self.Core.total_tokens['completion_tokens'] += tokens_used['completion_tokens']
|
| 75 |
-
# self.Core.temp_token_counter += tokens_used['prompt_tokens'] + tokens_used['completion_tokens']
|
| 76 |
-
|
| 77 |
-
# 4) Parse & validate the next message (we do the same as the single-message pipeline)
|
| 78 |
-
parsed_output_str = self.parsing_output_message(criticized_msg, user)
|
| 79 |
-
if not parsed_output_str:
|
| 80 |
-
print(f"Parsing output failed for step {step}. Stopping.")
|
| 81 |
-
break
|
| 82 |
-
|
| 83 |
-
try:
|
| 84 |
-
parsed_output_dict = json.loads(parsed_output_str)
|
| 85 |
-
except json.JSONDecodeError:
|
| 86 |
-
print(f"Could not parse the new message as JSON for step {step}. Stopping.")
|
| 87 |
-
break
|
| 88 |
-
|
| 89 |
-
# Add this next message to our sequence
|
| 90 |
-
message_sequence.append(parsed_output_dict)
|
| 91 |
-
|
| 92 |
-
# 5) Return the entire sequence so it can be stored back in the DataFrame or elsewhere
|
| 93 |
-
final_structure = {"messages_sequence": message_sequence}
|
| 94 |
-
return json.dumps(final_structure, ensure_ascii=False)
|
| 95 |
-
|
| 96 |
-
# --------------------------------------------------------------
|
| 97 |
-
def generate_next_messages(self, previous_messages, step, user):
|
| 98 |
-
"""
|
| 99 |
-
Uses only the last two previously generated messages to produce the next message.
|
| 100 |
-
Returns a *raw* dictionary (header, message, etc.) from the LLM.
|
| 101 |
-
|
| 102 |
-
:param previous_messages: A list of dicts, each containing at least "header" and "message".
|
| 103 |
-
:param step: The 1-based index of the message we’re about to generate.
|
| 104 |
-
:return: A dictionary from LLM (with 'header' and 'message'), or None if generation fails.
|
| 105 |
-
"""
|
| 106 |
-
# Only keep up to the last two messages
|
| 107 |
-
if len(previous_messages) > 2:
|
| 108 |
-
context = previous_messages[-2:]
|
| 109 |
-
else:
|
| 110 |
-
context = previous_messages
|
| 111 |
-
|
| 112 |
-
# 1) Build a prompt that includes only those last two messages
|
| 113 |
-
prompt = self.generate_prompt(context, step, user)
|
| 114 |
-
|
| 115 |
-
# new_prompt = self.engine.prompt_engineering(prompt)
|
| 116 |
-
|
| 117 |
-
# 2) Call our existing LLM routine
|
| 118 |
-
response_dict = self.llm.get_response(prompt=prompt, instructions=self.llm_instructions())
|
| 119 |
-
|
| 120 |
-
return response_dict
|
| 121 |
-
|
| 122 |
-
# ===============================================================
|
| 123 |
-
def get_examples(self, step):
|
| 124 |
-
"""
|
| 125 |
-
providing examples and instructions
|
| 126 |
-
:return:
|
| 127 |
-
"""
|
| 128 |
-
|
| 129 |
-
if self.Core.subsequent_examples is not None:
|
| 130 |
-
|
| 131 |
-
instructions = f"""
|
| 132 |
-
# ** Example **
|
| 133 |
-
Below are some acceptable examples of the voice we want. Create a header and message that follow the same style, tone, vocabulary, and characteristics.
|
| 134 |
-
Mimic the example style as much as possible and make it personalized using provided information.
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
### **Good Examples:**
|
| 138 |
-
|
| 139 |
-
{self.Core.subsequent_examples[step]}
|
| 140 |
-
"""
|
| 141 |
-
return instructions
|
| 142 |
-
else:
|
| 143 |
-
return ""
|
| 144 |
-
|
| 145 |
-
# --------------------------------------------------------------
|
| 146 |
-
def generate_prompt(self, previous_messages, step, user):
|
| 147 |
-
"""
|
| 148 |
-
Creates a prompt to feed to the LLM, incorporating 3 previously generated messages.
|
| 149 |
-
|
| 150 |
-
:param previous_messages: A list of dicts, each containing 'header' and 'message'.
|
| 151 |
-
:return: A user-facing prompt string instructing the model to produce a new message.
|
| 152 |
-
"""
|
| 153 |
-
# Build a textual summary of previous messages - last three
|
| 154 |
-
recent_messages = previous_messages[-3:]
|
| 155 |
-
|
| 156 |
-
previous_text = []
|
| 157 |
-
for i, m in enumerate(recent_messages, start=1):
|
| 158 |
-
header = m.get("header", "").strip()
|
| 159 |
-
body = m.get("message", "").strip()
|
| 160 |
-
previous_text.append(f"Message {i}: (Header) {header}\n (Body) {body}")
|
| 161 |
-
|
| 162 |
-
# Combine into a single string
|
| 163 |
-
previous_text_str = "\n\n".join(previous_text)
|
| 164 |
-
|
| 165 |
-
user_info = self.promptGen.get_user_profile(user=user)
|
| 166 |
-
input_context = self.promptGen.input_context()
|
| 167 |
-
output_instructions = self.output_instruction()
|
| 168 |
-
general_specifications = self.general_specifications()
|
| 169 |
-
|
| 170 |
-
examples = self.get_examples(step)
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
# Craft the prompt
|
| 174 |
-
prompt = f"""
|
| 175 |
-
We have previously sent these push notifications to the user and The user has not re-engaged yet:
|
| 176 |
-
|
| 177 |
-
** Previous messages **
|
| 178 |
-
{previous_text_str}
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
**Objective**
|
| 182 |
-
Write the *next* follow up personalized push notification following the instructions and what we know about the user.
|
| 183 |
-
{input_context}
|
| 184 |
-
|
| 185 |
-
{user_info}
|
| 186 |
-
|
| 187 |
-
### ** General Specifications: **
|
| 188 |
-
|
| 189 |
-
{general_specifications}
|
| 190 |
-
|
| 191 |
-
# **specific instructions**:
|
| 192 |
-
- {self.Core.subsequence_messages[step]}
|
| 193 |
-
|
| 194 |
-
{examples}
|
| 195 |
-
|
| 196 |
-
{output_instructions}
|
| 197 |
-
"""
|
| 198 |
-
|
| 199 |
-
return prompt
|
| 200 |
-
# ===========================================================================
|
| 201 |
-
def general_specifications(self):
|
| 202 |
-
"""
|
| 203 |
-
general_specifications
|
| 204 |
-
:return: instructions
|
| 205 |
-
"""
|
| 206 |
-
|
| 207 |
-
instructions = """
|
| 208 |
-
- Start directly with the message content without greetings or closing phrases.
|
| 209 |
-
- Avoid using same or similar words so close together in "message" and "header", and make sure there is no grammar problem.
|
| 210 |
-
- message and header **MUST** be different from previous messages in terms of similar words, vocabulary and phrases.
|
| 211 |
-
- The message, vocabulary and sentences **MUST** sound like a natural conversation: something that people normally say in daily conversations.
|
| 212 |
-
|
| 213 |
-
"""
|
| 214 |
-
return instructions
|
| 215 |
-
|
| 216 |
-
# =============================================================================
|
| 217 |
-
|
| 218 |
-
def output_instruction(self):
|
| 219 |
-
"""
|
| 220 |
-
:return: output instructions as a string
|
| 221 |
-
"""
|
| 222 |
-
|
| 223 |
-
# Provide constraints for our next push notification
|
| 224 |
-
header_limit = self.Core.config_file.get("header_limit", 50)
|
| 225 |
-
message_limit = self.Core.config_file.get("message_limit", 200)
|
| 226 |
-
|
| 227 |
-
general_instructions = f"""
|
| 228 |
-
- The "header" must be less than {header_limit} character.
|
| 229 |
-
- The "message" must be less than {message_limit} character.
|
| 230 |
-
- Don't use emoji if we used emoji in our previous messages.,
|
| 231 |
-
- if we didn't have emojis in previous message, you are **ONLY ALLOWED** to use {self.Core.get_emoji()} emoji if needed. (ONLY ONCE, and ONLY at the end of header or message).
|
| 232 |
-
- Ensure that the output is a valid JSON and not include any text outside the JSON code block.
|
| 233 |
-
"""
|
| 234 |
-
|
| 235 |
-
instructions = f"""
|
| 236 |
-
Expected output structure:
|
| 237 |
-
|
| 238 |
-
{{
|
| 239 |
-
"header": "Generated title",
|
| 240 |
-
"message": "Generated message",
|
| 241 |
-
}}
|
| 242 |
-
|
| 243 |
-
{general_instructions}
|
| 244 |
-
"""
|
| 245 |
-
return instructions
|
| 246 |
-
|
| 247 |
-
# --------------------------------------------------------------
|
| 248 |
-
def parsing_output_message(self, message, user):
|
| 249 |
-
"""
|
| 250 |
-
Parses the output JSON from the LLM and enriches it with additional content
|
| 251 |
-
information if needed (e.g., from recsys). Re-uses the logic from the single-message
|
| 252 |
-
pipeline to keep the results consistent.
|
| 253 |
-
|
| 254 |
-
:param message: Output JSON *dictionary* from the LLM (with at least "message" and "header").
|
| 255 |
-
:param user: The user row dictionary.
|
| 256 |
-
:return: A valid JSON string or None if the structure is invalid.
|
| 257 |
-
"""
|
| 258 |
-
if self.Core.involve_recsys_result:
|
| 259 |
-
# If recsys is used, fetch recommendation data
|
| 260 |
-
output_message = self.fetch_recommendation_data(user, message)
|
| 261 |
-
elif self.Core.messaging_mode == "recommend_playlist":
|
| 262 |
-
# If recommending a playlist, add the relevant fields
|
| 263 |
-
if "playlist_id" in message and "message" in message:
|
| 264 |
-
playlist_id = str(message["playlist_id"])
|
| 265 |
-
web_url_path = f"https://www.musora.com/{self.Core.brand}/playlist/{playlist_id}"
|
| 266 |
-
output_message = {
|
| 267 |
-
"header": message.get("header", ""),
|
| 268 |
-
"message": message.get("message", ""),
|
| 269 |
-
"playlist_id": int(message["playlist_id"]),
|
| 270 |
-
"web_url_path": web_url_path,
|
| 271 |
-
}
|
| 272 |
-
else:
|
| 273 |
-
print("LLM output is missing either 'playlist_id' or 'message'.")
|
| 274 |
-
return None
|
| 275 |
-
else:
|
| 276 |
-
# Basic scenario: Only 'header' and 'message' expected
|
| 277 |
-
if "message" not in message or "header" not in message:
|
| 278 |
-
print("LLM output is missing 'header' or 'message'.")
|
| 279 |
-
return None
|
| 280 |
-
output_message = {
|
| 281 |
-
"header": message["header"],
|
| 282 |
-
"message": message["message"]
|
| 283 |
-
}
|
| 284 |
-
|
| 285 |
-
return json.dumps(output_message, ensure_ascii=False)
|
| 286 |
-
|
| 287 |
-
# --------------------------------------------------------------
|
| 288 |
-
def fetch_recommendation_data(self, user, message):
|
| 289 |
-
"""
|
| 290 |
-
Extracts recommendation data from user's recsys_result and merges it into the given
|
| 291 |
-
message dictionary. Identical to single-message usage.
|
| 292 |
-
|
| 293 |
-
:param user: The user row (with 'recsys_result', 'recommendation', etc.).
|
| 294 |
-
:param message: Dictionary with at least "header" and "message".
|
| 295 |
-
:return: Enriched dict (header, message, content_id, web_url_path, title, thumbnail_url)
|
| 296 |
-
"""
|
| 297 |
-
user_id = user["user_id"]
|
| 298 |
-
content_id = int(user["recommendation"])
|
| 299 |
-
recsys_json_str = user["recsys_result"]
|
| 300 |
-
recsys_data = json.loads(recsys_json_str)
|
| 301 |
-
|
| 302 |
-
# Initialize variable to store found item
|
| 303 |
-
found_item = None
|
| 304 |
-
for category, items in recsys_data.items():
|
| 305 |
-
for item in items:
|
| 306 |
-
if item.get("content_id") == content_id:
|
| 307 |
-
found_item = item
|
| 308 |
-
break
|
| 309 |
-
if found_item:
|
| 310 |
-
break
|
| 311 |
-
|
| 312 |
-
if not found_item:
|
| 313 |
-
print(f"content_id {content_id} not found in recsys_data for user_id {user_id}.")
|
| 314 |
-
return None
|
| 315 |
-
|
| 316 |
-
web_url_path = found_item.get("web_url_path")
|
| 317 |
-
title = found_item.get("title")
|
| 318 |
-
thumbnail_url = found_item.get("thumbnail_url")
|
| 319 |
-
|
| 320 |
-
# Construct final dictionary
|
| 321 |
-
output_message = {
|
| 322 |
-
"header": message.get("header"),
|
| 323 |
-
"message": message.get("message", "").replace('\\', '').replace('"', ''),
|
| 324 |
-
"content_id": content_id,
|
| 325 |
-
"web_url_path": web_url_path,
|
| 326 |
-
"title": title,
|
| 327 |
-
"thumbnail_url": thumbnail_url
|
| 328 |
-
}
|
| 329 |
-
return output_message
|
| 330 |
-
|
| 331 |
-
# --------------------------------------------------------------
|
| 332 |
-
# --------------------------------------------------------------
|
| 333 |
-
|
| 334 |
-
def llm_instructions(self):
|
| 335 |
-
"""
|
| 336 |
-
Setting instructions for llm
|
| 337 |
-
:return: instructions as string
|
| 338 |
-
"""
|
| 339 |
-
|
| 340 |
-
jargon_list = "\n".join(f"- {word}" for word in self.Core.config_file["AI_Jargon"])
|
| 341 |
-
|
| 342 |
-
# instructions = f"""
|
| 343 |
-
# You are a copywriter. Your task is to write a 'header' and a 'message' as a push notification for a {self.Core.get_instrument()} student that sounds like natural everyday speech: friendly, concise, no jargon, and following the instructions.
|
| 344 |
-
# Write a SUPER CASUAL and NATURAL push notification, as if you are chatting over coffee. Avoid odd phrasings.
|
| 345 |
-
#
|
| 346 |
-
# ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 347 |
-
# the header and the message **MUST NOT** contain any banned word or phrases (case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 348 |
-
#
|
| 349 |
-
# Banned word:
|
| 350 |
-
# {jargon_list}
|
| 351 |
-
#
|
| 352 |
-
# Banned phrases:
|
| 353 |
-
# Voice is NOT an instrument, so avoid phrases like below:
|
| 354 |
-
# - Your voice is waiting
|
| 355 |
-
# - Your voice awaits
|
| 356 |
-
# - Your voice needs you
|
| 357 |
-
# - Your voice is calling
|
| 358 |
-
# - Your voice deserves more
|
| 359 |
-
# - Hit the high notes / Hit those notes
|
| 360 |
-
# - ...
|
| 361 |
-
#
|
| 362 |
-
# """.strip()
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
instructions = f"""
|
| 366 |
-
|
| 367 |
-
You are a copywriter. Your task is to write a 'header' and a 'message' as a push notification for a {self.Core.get_instrument()} student. It is critical that the message sounds like natural, everyday speech: friendly, concise, no jargon, and it must follow the instructions.
|
| 368 |
-
Write a SUPER CASUAL and NATURAL push notification, as if you are chatting over coffee. Avoid odd phrasings. The message should sound like something that a {self.Core.get_instrument()} instructor would realistically say to a student. Here are some examples of things that an instructor would realistically say to a student, to give you a general sense of tone and phrasing:
|
| 369 |
-
|
| 370 |
-
Common instructor phrases:
|
| 371 |
-
{self.Core.brand_voice}
|
| 372 |
-
|
| 373 |
-
ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 374 |
-
the header and the message **MUST NOT** contain any banned word or phrases(case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 375 |
-
|
| 376 |
-
Banned word:
|
| 377 |
-
{jargon_list}
|
| 378 |
-
|
| 379 |
-
Banned phrases:
|
| 380 |
-
Voice is NOT an instrument, so avoid phrases like below:
|
| 381 |
-
- Your voice is waiting
|
| 382 |
-
- Your voice awaits
|
| 383 |
-
- Your voice needs you
|
| 384 |
-
- Your voice is calling
|
| 385 |
-
- Your voice deserves more
|
| 386 |
-
- Hit the high notes / Hit those notes
|
| 387 |
-
- ...
|
| 388 |
-
|
| 389 |
-
"""
|
| 390 |
-
|
| 391 |
-
banned = """
|
| 392 |
-
ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 393 |
-
the header and the message **MUST NOT** contain any banned word or phrases(case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 394 |
-
|
| 395 |
-
Banned word:
|
| 396 |
-
{jargon_list}
|
| 397 |
-
|
| 398 |
-
Banned phrases:
|
| 399 |
-
Voice is NOT an instrument, so avoid phrases like below:
|
| 400 |
-
- Your voice is waiting
|
| 401 |
-
- Your voice awaits
|
| 402 |
-
- Your voice needs you
|
| 403 |
-
- Your voice is calling
|
| 404 |
-
- Your voice deserves more
|
| 405 |
-
- Hit the high notes / Hit those notes
|
| 406 |
-
- ...
|
| 407 |
-
|
| 408 |
-
"""
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
return instructions
|
| 412 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/Permes.py
DELETED
|
@@ -1,202 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
the flow of the Program starts from create_personalized_message function
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
import time
|
| 7 |
-
from tqdm import tqdm
|
| 8 |
-
from Messaging_system.DataCollector import DataCollector
|
| 9 |
-
from Messaging_system.CoreConfig import CoreConfig
|
| 10 |
-
from Messaging_system.LLMR import LLMR
|
| 11 |
-
import streamlit as st
|
| 12 |
-
from Messaging_system.Message_generator import MessageGenerator
|
| 13 |
-
from Messaging_system.PromptGenerator import PromptGenerator
|
| 14 |
-
from Messaging_system.SnowFlakeConnection import SnowFlakeConn
|
| 15 |
-
from Messaging_system.Homepage_Recommender import DefaultRec
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
class Permes:
|
| 20 |
-
"""
|
| 21 |
-
LLM-based personalized message generator:
|
| 22 |
-
"""
|
| 23 |
-
|
| 24 |
-
def create_personalize_messages(self, session, users, brand, config_file, openai_api_key,
|
| 25 |
-
platform="push", number_of_messages=1, instructionset=None, subsequent_examples=None
|
| 26 |
-
, recsys_contents=None, model=None, identifier_column="user_id", segment_info=None,
|
| 27 |
-
sample_example=None, number_of_samples=None, message_style=None, involve_recsys_result=False,
|
| 28 |
-
messaging_mode="message", ongoing_df=None, personalization=True,
|
| 29 |
-
progress_callback=None, segment_name="no_recent_activity"):
|
| 30 |
-
"""
|
| 31 |
-
creating personalized messages for the input users given the parameters for both app and push platform.
|
| 32 |
-
:param session: snowflake connection object
|
| 33 |
-
:param users: users dataframe
|
| 34 |
-
:param brand
|
| 35 |
-
:param config_file
|
| 36 |
-
:param openai_api_key
|
| 37 |
-
:param CTA: call to action for the messages
|
| 38 |
-
:param segment_info: common information about the users
|
| 39 |
-
:param message_style: style of the message
|
| 40 |
-
:param sample_example: a sample for one shot prompting
|
| 41 |
-
:return:
|
| 42 |
-
"""
|
| 43 |
-
|
| 44 |
-
# primary processing
|
| 45 |
-
users = self.identify_users(users_df=users, identifier_column=identifier_column)
|
| 46 |
-
|
| 47 |
-
personalize_message = CoreConfig(session=session,
|
| 48 |
-
users_df=users,
|
| 49 |
-
brand=brand,
|
| 50 |
-
platform=platform,
|
| 51 |
-
config_file=config_file)
|
| 52 |
-
|
| 53 |
-
personalize_message.set_openai_api(openai_api_key)
|
| 54 |
-
personalize_message.set_segment_name(segment_name=segment_name)
|
| 55 |
-
personalize_message.set_number_of_messages(number_of_messages=number_of_messages,
|
| 56 |
-
instructionset=instructionset,
|
| 57 |
-
subsequent_examples=subsequent_examples)
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
if sample_example is not None: # Check if sample_example is not empty
|
| 61 |
-
personalize_message.set_sample_example(sample_example)
|
| 62 |
-
|
| 63 |
-
if number_of_samples is not None:
|
| 64 |
-
personalize_message.set_number_of_samples(number_of_samples)
|
| 65 |
-
|
| 66 |
-
if model is not None:
|
| 67 |
-
personalize_message.set_llm_model(model)
|
| 68 |
-
|
| 69 |
-
if segment_info is not None:
|
| 70 |
-
personalize_message.set_segment_info(segment_info)
|
| 71 |
-
|
| 72 |
-
if message_style is not None or message_style != "":
|
| 73 |
-
personalize_message.set_message_style(message_style)
|
| 74 |
-
|
| 75 |
-
if personalization:
|
| 76 |
-
personalize_message.set_personalization()
|
| 77 |
-
|
| 78 |
-
if involve_recsys_result:
|
| 79 |
-
personalize_message.set_messaging_mode("recsys_result")
|
| 80 |
-
personalize_message.set_involve_recsys_result(involve_recsys_result)
|
| 81 |
-
|
| 82 |
-
# if messaging_mode != "message":
|
| 83 |
-
# personalize_message.set_messaging_mode(messaging_mode)
|
| 84 |
-
|
| 85 |
-
if recsys_contents:
|
| 86 |
-
personalize_message.set_recsys_contents(recsys_contents)
|
| 87 |
-
|
| 88 |
-
users_df = self._create_personalized_message(CoreConfig=personalize_message, progress_callback=progress_callback)
|
| 89 |
-
|
| 90 |
-
total_prompt_tokens = personalize_message.total_tokens["prompt_tokens"]
|
| 91 |
-
total_completion_tokens = personalize_message.total_tokens["completion_tokens"]
|
| 92 |
-
|
| 93 |
-
total_cost = self.calculate_cost(total_prompt_tokens, total_completion_tokens, model)
|
| 94 |
-
|
| 95 |
-
print(f"Estimated Cost (USD): {total_cost:.5f} ---> Number of messages: {(len(users_df) * number_of_messages)}")
|
| 96 |
-
st.write(f"Estimated Cost (USD): {total_cost:.5f} ---> Number of messages: {(len(users_df) * number_of_messages)}")
|
| 97 |
-
|
| 98 |
-
scale_price = (total_cost * 1000) / (len(users_df) * number_of_messages)
|
| 99 |
-
print(f"Estimated Cost (USD) for 1000 messages: {scale_price}")
|
| 100 |
-
st.write(f"Estimated Cost (USD) for 1000 messages: {scale_price}")
|
| 101 |
-
|
| 102 |
-
return users_df
|
| 103 |
-
|
| 104 |
-
# -----------------------------------------------------
|
| 105 |
-
def calculate_cost(self, total_prompt_tokens, total_completion_tokens, model):
|
| 106 |
-
input_price, output_price = self.get_model_price(model)
|
| 107 |
-
|
| 108 |
-
total_cost = ((total_prompt_tokens / 1000000) * input_price) + (
|
| 109 |
-
(total_completion_tokens / 1000000) * output_price) # Cost calculation estimation
|
| 110 |
-
|
| 111 |
-
return total_cost
|
| 112 |
-
|
| 113 |
-
# ====================================================
|
| 114 |
-
def get_model_price(self, model):
|
| 115 |
-
"""
|
| 116 |
-
getting the input price and output price per 1m token for the requested model
|
| 117 |
-
:param model:
|
| 118 |
-
:return:
|
| 119 |
-
"""
|
| 120 |
-
|
| 121 |
-
input_prices = {
|
| 122 |
-
"gpt-4o-mini":0.15,
|
| 123 |
-
"gpt-4.1-mini":0.4,
|
| 124 |
-
"gpt-5-mini": 0.25,
|
| 125 |
-
"gpt-5-nano": 0.05,
|
| 126 |
-
"gemini-2.5-flash":0.3,
|
| 127 |
-
"gemini-2.0-flash":0.1,
|
| 128 |
-
"gemini-2.5-flash-lite":0.1,
|
| 129 |
-
"claude-3-5-haiku-latest": 0.8,
|
| 130 |
-
"google/gemma-3-27b-instruct/bf-16": 0.15
|
| 131 |
-
}
|
| 132 |
-
|
| 133 |
-
out_prices = {
|
| 134 |
-
"gpt-4o-mini":0.6,
|
| 135 |
-
"gpt-4.1-mini":1.6,
|
| 136 |
-
"gpt-5-mini": 2,
|
| 137 |
-
"gpt-5-nano": 0.4,
|
| 138 |
-
"gemini-2.5-flash":2.5,
|
| 139 |
-
"gemini-2.0-flash":0.7,
|
| 140 |
-
"gemini-2.5-flash-lite":0.4,
|
| 141 |
-
"claude-3-5-haiku-latest": 3,
|
| 142 |
-
"google/gemma-3-27b-instruct/bf-16":0.3
|
| 143 |
-
}
|
| 144 |
-
|
| 145 |
-
i_price = input_prices.get(model, 0)
|
| 146 |
-
o_price= out_prices.get(model, 0)
|
| 147 |
-
|
| 148 |
-
return i_price, o_price
|
| 149 |
-
|
| 150 |
-
# ====================================================
|
| 151 |
-
def identify_users(self, users_df, identifier_column):
|
| 152 |
-
"""
|
| 153 |
-
specifying the users for identification
|
| 154 |
-
:param identifier_column:
|
| 155 |
-
:return: updated users
|
| 156 |
-
"""
|
| 157 |
-
|
| 158 |
-
if identifier_column.upper() == "EMAIL":
|
| 159 |
-
return users_df
|
| 160 |
-
else:
|
| 161 |
-
users_df.rename(columns={identifier_column: "USER_ID"}, inplace=True)
|
| 162 |
-
return users_df
|
| 163 |
-
|
| 164 |
-
# ------------------------------------------------------------------
|
| 165 |
-
def _create_personalized_message(self, CoreConfig, step=1, progress_callback=None):
|
| 166 |
-
"""
|
| 167 |
-
main function of the class to flow the work between functions inorder to create personalized messages.
|
| 168 |
-
:return: updated users_df with extracted information and personalize messages.
|
| 169 |
-
"""
|
| 170 |
-
# Collecting all the data that we need to personalize messages
|
| 171 |
-
datacollect = DataCollector(CoreConfig)
|
| 172 |
-
CoreConfig = datacollect.gather_data()
|
| 173 |
-
|
| 174 |
-
# generating recommendations for users, if we want to include recommendations in the message
|
| 175 |
-
if CoreConfig.involve_recsys_result and CoreConfig.messaging_mode != "message":
|
| 176 |
-
Recommender = LLMR(CoreConfig, random=True)
|
| 177 |
-
CoreConfig = Recommender.get_recommendations(progress_callback)
|
| 178 |
-
|
| 179 |
-
else:
|
| 180 |
-
# We only want to generate the message and redirect them to For You section or Homepage
|
| 181 |
-
Recommender = DefaultRec(CoreConfig)
|
| 182 |
-
CoreConfig = Recommender.get_recommendations()
|
| 183 |
-
|
| 184 |
-
# generating proper prompt for each user
|
| 185 |
-
prompt = PromptGenerator(CoreConfig)
|
| 186 |
-
CoreConfig = prompt.generate_prompts()
|
| 187 |
-
|
| 188 |
-
# generating messages for each user
|
| 189 |
-
message_generator = MessageGenerator(CoreConfig)
|
| 190 |
-
CoreConfig = message_generator.generate_messages(progress_callback)
|
| 191 |
-
|
| 192 |
-
# Eliminating rows where we don't have a valid message (null, empty, or whitespace only)
|
| 193 |
-
CoreConfig.users_df = CoreConfig.users_df[CoreConfig.users_df["message"].str.strip().astype(bool)]
|
| 194 |
-
CoreConfig.checkpoint()
|
| 195 |
-
|
| 196 |
-
# closing snowflake connection
|
| 197 |
-
# CoreConfig.session.close()
|
| 198 |
-
|
| 199 |
-
return CoreConfig.users_df
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/PromptEng.py
DELETED
|
@@ -1,268 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
This is the prompt engineering layer to modifty the prompt for better perfromance
|
| 3 |
-
"""
|
| 4 |
-
import openai
|
| 5 |
-
from fontTools.ttLib.tables.ttProgram import instructions
|
| 6 |
-
from openai import OpenAI
|
| 7 |
-
from Messaging_system.LLM import LLM
|
| 8 |
-
import os
|
| 9 |
-
import streamlit as st
|
| 10 |
-
from google.genai import types
|
| 11 |
-
from google import genai
|
| 12 |
-
|
| 13 |
-
class PromptEngine:
|
| 14 |
-
|
| 15 |
-
def __init__(self, coreconfig):
|
| 16 |
-
self.Core=coreconfig
|
| 17 |
-
self.llm=LLM(self.Core)
|
| 18 |
-
|
| 19 |
-
# ============================================================
|
| 20 |
-
def get_credential(self, key):
|
| 21 |
-
return os.getenv(key) or st.secrets.get(key)
|
| 22 |
-
|
| 23 |
-
# =============================================================
|
| 24 |
-
def prompt_engineering(self, prompt):
|
| 25 |
-
"""
|
| 26 |
-
prompt engineering layer to modify the prompt as needed
|
| 27 |
-
:param prompt:
|
| 28 |
-
:return:
|
| 29 |
-
"""
|
| 30 |
-
|
| 31 |
-
new_prompt = f"""
|
| 32 |
-
|
| 33 |
-
Modify below prompt following best prompt engineering methods. return only the new prompt as a text.
|
| 34 |
-
modify the prompt and instructions in <original_prompt> tag to maximimize better results by providing the new prompt.
|
| 35 |
-
|
| 36 |
-
### Original prompt
|
| 37 |
-
|
| 38 |
-
<original_prompt>
|
| 39 |
-
|
| 40 |
-
{prompt}
|
| 41 |
-
|
| 42 |
-
</original_prompt>
|
| 43 |
-
|
| 44 |
-
output the new prompt as text without any additional information.
|
| 45 |
-
|
| 46 |
-
"""
|
| 47 |
-
|
| 48 |
-
final_prompt = self.get_final_prompt(new_prompt)
|
| 49 |
-
return final_prompt
|
| 50 |
-
# ===========================================================
|
| 51 |
-
def get_final_prompt(self, prompt):
|
| 52 |
-
|
| 53 |
-
if self.Core.model in self.Core.config_file["openai_models"]:
|
| 54 |
-
final_prompt = self.get_openai_response(prompt)
|
| 55 |
-
return final_prompt
|
| 56 |
-
|
| 57 |
-
elif self.Core.model in self.Core.config_file["inference_models"]:
|
| 58 |
-
final_prompt = self.get_inference_response(prompt)
|
| 59 |
-
return final_prompt
|
| 60 |
-
|
| 61 |
-
elif self.Core.model in self.Core.config_file["claude_models"]:
|
| 62 |
-
final_prompt = self.get_claude_response(prompt, self.llm_instructions())
|
| 63 |
-
return final_prompt
|
| 64 |
-
|
| 65 |
-
elif self.Core.model in self.Core.config_file["google_models"]:
|
| 66 |
-
final_prompt = self.get_gemini_response(prompt)
|
| 67 |
-
return final_prompt
|
| 68 |
-
|
| 69 |
-
# ============================================================
|
| 70 |
-
def llm_instructions(self):
|
| 71 |
-
|
| 72 |
-
system_prompt = """
|
| 73 |
-
You are a prompt engineer. Rewrite the following prompt to be clearer, more specific, and likely to produce a better response from an LLM following best prompt engineering techniques and styles.
|
| 74 |
-
"""
|
| 75 |
-
|
| 76 |
-
return system_prompt
|
| 77 |
-
|
| 78 |
-
# =============================================================
|
| 79 |
-
def get_inference_response(self, prompt, max_retries=4):
|
| 80 |
-
api_key = self.get_credential("inference_api_key")
|
| 81 |
-
client = OpenAI(
|
| 82 |
-
base_url="https://api.inference.net/v1",
|
| 83 |
-
api_key=api_key,
|
| 84 |
-
)
|
| 85 |
-
|
| 86 |
-
reasoning = self.Core.reasoning_model
|
| 87 |
-
system_prompt = self.llm_instructions()
|
| 88 |
-
|
| 89 |
-
for attempt in range(max_retries):
|
| 90 |
-
try:
|
| 91 |
-
if reasoning:
|
| 92 |
-
response = client.chat.completions.create(
|
| 93 |
-
model=self.Core.model,
|
| 94 |
-
response_format={"type": "text"},
|
| 95 |
-
messages=[
|
| 96 |
-
{"role": "system", "content": system_prompt},
|
| 97 |
-
{"role": "user", "content": prompt}
|
| 98 |
-
],
|
| 99 |
-
reasoning_effort="medium",
|
| 100 |
-
n=1,
|
| 101 |
-
)
|
| 102 |
-
else:
|
| 103 |
-
response = client.chat.completions.create(
|
| 104 |
-
model=self.Core.model,
|
| 105 |
-
response_format={"type": "text"},
|
| 106 |
-
messages=[
|
| 107 |
-
{"role": "system", "content": system_prompt},
|
| 108 |
-
{"role": "user", "content": prompt}
|
| 109 |
-
],
|
| 110 |
-
n=1,
|
| 111 |
-
temperature=self.Core.temperature
|
| 112 |
-
)
|
| 113 |
-
|
| 114 |
-
tokens = {
|
| 115 |
-
'prompt_tokens': response.usage.prompt_tokens,
|
| 116 |
-
'completion_tokens': response.usage.completion_tokens,
|
| 117 |
-
'total_tokens': response.usage.total_tokens
|
| 118 |
-
}
|
| 119 |
-
|
| 120 |
-
content = response.choices[0].message.content
|
| 121 |
-
output = str(content)
|
| 122 |
-
|
| 123 |
-
# validating the JSON
|
| 124 |
-
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 125 |
-
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 126 |
-
self.Core.temp_token_counter += tokens['total_tokens']
|
| 127 |
-
return output
|
| 128 |
-
|
| 129 |
-
except openai.APIConnectionError as e:
|
| 130 |
-
print("The server could not be reached")
|
| 131 |
-
print(e.__cause__) # an underlying Exception, likely raised within httpx.
|
| 132 |
-
except openai.RateLimitError as e:
|
| 133 |
-
print("A 429 status code was received; we should back off a bit.")
|
| 134 |
-
except openai.APIStatusError as e:
|
| 135 |
-
print("Another non-200-range status code was received")
|
| 136 |
-
print(e.status_code)
|
| 137 |
-
print(e.response)
|
| 138 |
-
|
| 139 |
-
print("Max retries exceeded. Returning empty response.")
|
| 140 |
-
return prompt # returns original prompt if needed
|
| 141 |
-
|
| 142 |
-
# ===============================================================
|
| 143 |
-
def get_openai_response(self, prompt, max_retries=4):
|
| 144 |
-
"""
|
| 145 |
-
sending the prompt to openai LLM and get back the response
|
| 146 |
-
"""
|
| 147 |
-
|
| 148 |
-
openai.api_key = self.Core.api_key
|
| 149 |
-
client = OpenAI(api_key=self.Core.api_key)
|
| 150 |
-
reasoning = self.Core.reasoning_model
|
| 151 |
-
system_prompt = self.llm_instructions()
|
| 152 |
-
|
| 153 |
-
for attempt in range(max_retries):
|
| 154 |
-
try:
|
| 155 |
-
if reasoning:
|
| 156 |
-
response = client.chat.completions.create(
|
| 157 |
-
model=self.Core.model,
|
| 158 |
-
response_format={"type": "text"},
|
| 159 |
-
messages=[
|
| 160 |
-
{"role": "system", "content": system_prompt},
|
| 161 |
-
{"role": "user", "content": prompt}
|
| 162 |
-
],
|
| 163 |
-
reasoning_effort="medium",
|
| 164 |
-
n=1,
|
| 165 |
-
)
|
| 166 |
-
else:
|
| 167 |
-
response = client.chat.completions.create(
|
| 168 |
-
model=self.Core.model,
|
| 169 |
-
response_format={"type": "text"},
|
| 170 |
-
messages=[
|
| 171 |
-
{"role": "system", "content": system_prompt},
|
| 172 |
-
{"role": "user", "content": prompt}
|
| 173 |
-
],
|
| 174 |
-
n=1,
|
| 175 |
-
temperature=self.Core.temperature
|
| 176 |
-
)
|
| 177 |
-
|
| 178 |
-
tokens = {
|
| 179 |
-
'prompt_tokens': response.usage.prompt_tokens,
|
| 180 |
-
'completion_tokens': response.usage.completion_tokens,
|
| 181 |
-
'total_tokens': response.usage.total_tokens
|
| 182 |
-
}
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
content = response.choices[0].message.content
|
| 186 |
-
output = str(content)
|
| 187 |
-
|
| 188 |
-
# validating the JSON
|
| 189 |
-
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 190 |
-
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 191 |
-
self.Core.temp_token_counter += tokens['total_tokens']
|
| 192 |
-
return output
|
| 193 |
-
|
| 194 |
-
except openai.APIConnectionError as e:
|
| 195 |
-
print("The server could not be reached")
|
| 196 |
-
print(e.__cause__) # an underlying Exception, likely raised within httpx.
|
| 197 |
-
except openai.RateLimitError as e:
|
| 198 |
-
print("A 429 status code was received; we should back off a bit.")
|
| 199 |
-
except openai.APIStatusError as e:
|
| 200 |
-
print("Another non-200-range status code was received")
|
| 201 |
-
print(e.status_code)
|
| 202 |
-
print(e.response)
|
| 203 |
-
|
| 204 |
-
print("Max retries exceeded. Returning empty response.")
|
| 205 |
-
return prompt # returns original prompt if needed
|
| 206 |
-
|
| 207 |
-
# ==========================================================================
|
| 208 |
-
def get_gemini_response(self, prompt, max_retries=4):
|
| 209 |
-
"""
|
| 210 |
-
Send prompt to Google Gemini LLM and get back the response
|
| 211 |
-
:param prompt:
|
| 212 |
-
:param max_retries:
|
| 213 |
-
:return:
|
| 214 |
-
"""
|
| 215 |
-
|
| 216 |
-
client = genai.Client(api_key=self.get_credential("Google_API"))
|
| 217 |
-
|
| 218 |
-
for attempt in range(max_retries):
|
| 219 |
-
try:
|
| 220 |
-
response = client.models.generate_content(
|
| 221 |
-
model=self.Core.model,
|
| 222 |
-
contents=prompt,
|
| 223 |
-
config=types.GenerateContentConfig(
|
| 224 |
-
thinking_config=types.ThinkingConfig(thinking_budget=0),
|
| 225 |
-
system_instruction=self.llm_instructions(),
|
| 226 |
-
temperature=self.Core.temperature,
|
| 227 |
-
response_mime_type = "text/plain" # application/json
|
| 228 |
-
))
|
| 229 |
-
|
| 230 |
-
output = str(response.text)
|
| 231 |
-
return output
|
| 232 |
-
except Exception as e:
|
| 233 |
-
print(f"Error in attempt {attempt}: {e}")
|
| 234 |
-
|
| 235 |
-
# ==========================================================================
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
def get_claude_response(self, prompt, instructions, max_retries=4):
|
| 239 |
-
"""
|
| 240 |
-
send prompt to claude LLM and get back the response
|
| 241 |
-
:param prompt:
|
| 242 |
-
:param instructions:
|
| 243 |
-
:return:
|
| 244 |
-
"""
|
| 245 |
-
|
| 246 |
-
for attempt in range(max_retries):
|
| 247 |
-
try:
|
| 248 |
-
|
| 249 |
-
message = self.llm.client.messages.create(
|
| 250 |
-
model=self.Core.model,
|
| 251 |
-
max_tokens=4096,
|
| 252 |
-
system = instructions,
|
| 253 |
-
messages=[
|
| 254 |
-
{"role": "user", "content": prompt}
|
| 255 |
-
],
|
| 256 |
-
temperature=self.Core.temperature
|
| 257 |
-
)
|
| 258 |
-
# Try generating the response
|
| 259 |
-
response = message.content[0].text
|
| 260 |
-
return response
|
| 261 |
-
except Exception as e:
|
| 262 |
-
print(f"Error: {e}")
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
print("Max retries exceeded. Returning empty response.")
|
| 266 |
-
return prompt # returns original prompt if needed
|
| 267 |
-
|
| 268 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/PromptGenerator_2.py
DELETED
|
@@ -1,446 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
THis class generate proper prompts for the messaging system
|
| 3 |
-
"""
|
| 4 |
-
import pandas as pd
|
| 5 |
-
from tqdm import tqdm
|
| 6 |
-
from Messaging_system.PromptEng import PromptEngine
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
class PromptGenerator:
|
| 10 |
-
|
| 11 |
-
def __init__(self, Core):
|
| 12 |
-
self.Core = Core
|
| 13 |
-
|
| 14 |
-
# --------------------------------------------------------------
|
| 15 |
-
# --------------------------------------------------------------
|
| 16 |
-
def generate_prompts(self):
|
| 17 |
-
"""
|
| 18 |
-
generates a personalized message for each student
|
| 19 |
-
:return:
|
| 20 |
-
"""
|
| 21 |
-
|
| 22 |
-
# engine = PromptEngine(self.Core)
|
| 23 |
-
|
| 24 |
-
# if we have personalized information about them, we generate a personalized prompt
|
| 25 |
-
for idx, row in tqdm(self.Core.users_df.iterrows(), desc="generating prompts"):
|
| 26 |
-
# check if we have enough information to generate a personalized message
|
| 27 |
-
prompt = self.generate_personalized_prompt(user=row)
|
| 28 |
-
# new_prompt = engine.prompt_engineering(prompt)
|
| 29 |
-
# self.Core.users_df.at[idx, "prompt"] = new_prompt
|
| 30 |
-
self.Core.users_df.at[idx, "prompt"] = prompt
|
| 31 |
-
self.Core.users_df.at[idx, "source"] = "AI-generated"
|
| 32 |
-
|
| 33 |
-
return self.Core
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
# --------------------------------------------------------------
|
| 37 |
-
def safe_get(self, value):
|
| 38 |
-
return str(value) if pd.notna(value) else "Not available"
|
| 39 |
-
|
| 40 |
-
# ==============================================================
|
| 41 |
-
def get_user_profile(self, user):
|
| 42 |
-
|
| 43 |
-
# additional_info = self.user_additional_info(user)
|
| 44 |
-
|
| 45 |
-
user_info = f"""
|
| 46 |
-
### **User Information:**
|
| 47 |
-
|
| 48 |
-
Here is the information about the user:
|
| 49 |
-
- The user is a {str(self.Core.get_instrument())} student.
|
| 50 |
-
- {self.safe_get(self.Core.segment_info)}
|
| 51 |
-
|
| 52 |
-
"""
|
| 53 |
-
|
| 54 |
-
## deleted from profile
|
| 55 |
-
# first name: {self.safe_get(user.get("first_name"))}
|
| 56 |
-
# Weeks since Last interaction:{self.safe_get(user.get("weeks_since_last_interaction"))}
|
| 57 |
-
# {self.safe_get(additional_info)}
|
| 58 |
-
# ** User
|
| 59 |
-
# profile: **
|
| 60 |
-
#
|
| 61 |
-
# {self.safe_get(user.get("user_info"))}
|
| 62 |
-
|
| 63 |
-
return user_info
|
| 64 |
-
|
| 65 |
-
# --------------------------------------------------------------
|
| 66 |
-
def generate_personalized_prompt(self, user):
|
| 67 |
-
"""
|
| 68 |
-
generate a personalized prompt by putting the information from the user into a template prompt
|
| 69 |
-
:return: Personalized prompt (string)
|
| 70 |
-
"""
|
| 71 |
-
input_context = self.input_context()
|
| 72 |
-
cta = self.CTA_instructions()
|
| 73 |
-
|
| 74 |
-
# if (self.Core.involve_recsys_result and self.Core.messaging_mode !="message") or self.Core.target_content is not None:
|
| 75 |
-
# if user["recommendation"] is not None or user["recommendation_info"] is not None:
|
| 76 |
-
# recommendations_instructions = self.recommendations_instructions(user=user) + "\n"
|
| 77 |
-
# else:
|
| 78 |
-
# recommendations_instructions = self.redirect_to_for_you()
|
| 79 |
-
# else:
|
| 80 |
-
# recommendations_instructions = self.redirect_to_for_you()
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
user_info = self.get_user_profile(user=user)
|
| 84 |
-
|
| 85 |
-
# personalize_message_instructions = self.personalize_message_instructions(user)
|
| 86 |
-
# {personalize_message_instructions}
|
| 87 |
-
|
| 88 |
-
general_instructions = self.message_type_instructions()
|
| 89 |
-
|
| 90 |
-
output_instructions = self.output_instruction()
|
| 91 |
-
|
| 92 |
-
# task_instructions = self.task_instructions()
|
| 93 |
-
# eliminate {task_instructions} and {recommendations_instructions}
|
| 94 |
-
|
| 95 |
-
prompt = f"""
|
| 96 |
-
{input_context}
|
| 97 |
-
|
| 98 |
-
{user_info}
|
| 99 |
-
|
| 100 |
-
{cta}
|
| 101 |
-
|
| 102 |
-
{general_instructions}
|
| 103 |
-
|
| 104 |
-
{output_instructions}
|
| 105 |
-
"""
|
| 106 |
-
|
| 107 |
-
return prompt
|
| 108 |
-
|
| 109 |
-
# --------------------------------------------------------------
|
| 110 |
-
# --------------------------------------------------------------
|
| 111 |
-
def input_context(self):
|
| 112 |
-
"""
|
| 113 |
-
:return: input instructions as a string
|
| 114 |
-
"""
|
| 115 |
-
|
| 116 |
-
context = f"""
|
| 117 |
-
Your task is to write a 'header' and a 'message' as a push notification for a {self.Core.get_instrument()} student that sounds like everyday natural speech: friendly, short, no jargon by following the instructions.
|
| 118 |
-
The output should sound like something that a {self.Core.get_instrument()} instructor would realistically say to a student in a daily conversation.
|
| 119 |
-
|
| 120 |
-
"""
|
| 121 |
-
|
| 122 |
-
if self.Core.brand_voice is not None:
|
| 123 |
-
context += f"""
|
| 124 |
-
** Examples of actual phrases an instructor might say:**
|
| 125 |
-
|
| 126 |
-
{self.Core.brand_voice}
|
| 127 |
-
"""
|
| 128 |
-
|
| 129 |
-
return context
|
| 130 |
-
|
| 131 |
-
# --------------------------------------------------------------
|
| 132 |
-
# --------------------------------------------------------------
|
| 133 |
-
def CTA_instructions(self):
|
| 134 |
-
"""
|
| 135 |
-
define CTA instructions
|
| 136 |
-
:return: CTA instructions (str)
|
| 137 |
-
"""
|
| 138 |
-
|
| 139 |
-
instructions = f"""
|
| 140 |
-
|
| 141 |
-
### **Main instructions**
|
| 142 |
-
|
| 143 |
-
{self.Core.CTA} \n
|
| 144 |
-
"""
|
| 145 |
-
|
| 146 |
-
return instructions
|
| 147 |
-
|
| 148 |
-
# --------------------------------------------------------------
|
| 149 |
-
# --------------------------------------------------------------
|
| 150 |
-
def user_additional_info(self, user):
|
| 151 |
-
"""
|
| 152 |
-
providing additional information given in the input data
|
| 153 |
-
:param user:
|
| 154 |
-
:return:
|
| 155 |
-
"""
|
| 156 |
-
|
| 157 |
-
if "additional_info" not in user.index:
|
| 158 |
-
return ""
|
| 159 |
-
|
| 160 |
-
if pd.notna(user["additional_info"]) and user["additional_info"] not in [None, [], {}] and (
|
| 161 |
-
not isinstance(user["additional_info"], str) or user["additional_info"].strip()):
|
| 162 |
-
additional_info = user["additional_info"]
|
| 163 |
-
else:
|
| 164 |
-
additional_info = ""
|
| 165 |
-
|
| 166 |
-
return additional_info
|
| 167 |
-
|
| 168 |
-
# --------------------------------------------------------------
|
| 169 |
-
# --------------------------------------------------------------
|
| 170 |
-
def recommendations_instructions(self, user):
|
| 171 |
-
"""
|
| 172 |
-
instructions about target recommendation for the user
|
| 173 |
-
:param user:
|
| 174 |
-
:return:
|
| 175 |
-
"""
|
| 176 |
-
|
| 177 |
-
instructions_for_recsys = f"""
|
| 178 |
-
### ** Recommendations instructions **:
|
| 179 |
-
Below is the content that we want to recommend to the user:
|
| 180 |
-
|
| 181 |
-
Recommended content: {user["recommendation_info"]}
|
| 182 |
-
|
| 183 |
-
- Use the **CONTENT_TITLE** naturally in the message if capable, but do not use the exact title verbatim or put it in quotes.
|
| 184 |
-
- Naturally mention the **CONTENT_TYPE** for course, workout, and quicktips if capable.
|
| 185 |
-
- If the recommended content has an **Artist** with a known full name, use the ** FULL NAME ** naturally in the message if capable. If only the first name of the Artist is available, ** DO NOT ** use it at all.
|
| 186 |
-
"""
|
| 187 |
-
|
| 188 |
-
# need to adjust
|
| 189 |
-
instructions_for_target_content = """
|
| 190 |
-
- Considering the information about the user, and the content that we want to recommend, include the **TITLE** inside single quotes, or use the title naturally without the exact title name and quotes if capable.
|
| 191 |
-
Naturally mention the **CONTENT_TYPE** for course, workout, quicktips if capable and shortly provide a reasoning why the content is helpful for them.
|
| 192 |
-
|
| 193 |
-
**Target recommended Content**:
|
| 194 |
-
"""
|
| 195 |
-
|
| 196 |
-
instructions = ""
|
| 197 |
-
|
| 198 |
-
if self.Core.involve_recsys_result:
|
| 199 |
-
instructions += f"""
|
| 200 |
-
{instructions_for_recsys}
|
| 201 |
-
"""
|
| 202 |
-
|
| 203 |
-
elif self.Core.target_content is not None:
|
| 204 |
-
# fetching the information related to the target content from content_table
|
| 205 |
-
target_info = self.get_target_content_info(user)
|
| 206 |
-
instructions += f"""
|
| 207 |
-
{instructions_for_target_content}
|
| 208 |
-
{target_info}
|
| 209 |
-
"""
|
| 210 |
-
|
| 211 |
-
return instructions
|
| 212 |
-
|
| 213 |
-
# --------------------------------------------------------------
|
| 214 |
-
# --------------------------------------------------------------
|
| 215 |
-
def get_target_content_info(self, user):
|
| 216 |
-
"""
|
| 217 |
-
fetching information about the target content that we want to recommend to the user
|
| 218 |
-
:param user: target user
|
| 219 |
-
:return:
|
| 220 |
-
"""
|
| 221 |
-
|
| 222 |
-
# checking that user[self.target_content] contains a content_id:
|
| 223 |
-
target_id = int(user[self.Core.target_content])
|
| 224 |
-
|
| 225 |
-
try:
|
| 226 |
-
|
| 227 |
-
# fetching the data for target content (self.target_content column in user)
|
| 228 |
-
content_info_row = self.Core.content_info.loc[self.Core.content_info['content_id'] == target_id]
|
| 229 |
-
|
| 230 |
-
text = f"""
|
| 231 |
-
**content_id** : {str(content_info_row["content_id"])}"
|
| 232 |
-
**content_info** : \n {content_info_row["content_info"]} \n\n"
|
| 233 |
-
"""
|
| 234 |
-
return text
|
| 235 |
-
except:
|
| 236 |
-
print(f"Target content cannot be found in the content database: content_id = {target_id}")
|
| 237 |
-
|
| 238 |
-
# --------------------------------------------------------------
|
| 239 |
-
# --------------------------------------------------------------
|
| 240 |
-
def personalize_message_instructions(self, user):
|
| 241 |
-
"""
|
| 242 |
-
:return: personalized message instructions as a string
|
| 243 |
-
"""
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
instructions = """
|
| 247 |
-
### ** Personalized Message Specifications **
|
| 248 |
-
|
| 249 |
-
"""
|
| 250 |
-
|
| 251 |
-
# # Name
|
| 252 |
-
# if "first_name" in self.Core.list_of_features and pd.notna(user["first_name"]) and user["first_name"] not in [
|
| 253 |
-
# None,
|
| 254 |
-
# [],
|
| 255 |
-
# {}] and (
|
| 256 |
-
# not isinstance(user["first_name"], str) or user["first_name"].strip()):
|
| 257 |
-
# instructions += f"""
|
| 258 |
-
# - Address the user by their first name in 'header' casually (only first letter capital) to make the message more personal. \n
|
| 259 |
-
# """
|
| 260 |
-
# else:
|
| 261 |
-
# instructions += """
|
| 262 |
-
# - If the user's name is not available or invalid (e.g. email), proceed without addressing them by name. \n
|
| 263 |
-
# """
|
| 264 |
-
|
| 265 |
-
# Birthday reminder
|
| 266 |
-
if "birthday_reminder" in self.Core.list_of_features and pd.notna(user["birthday_reminder"]) and user[
|
| 267 |
-
"birthday_reminder"] not in [None, [], {}] and (
|
| 268 |
-
not isinstance(user["birthday_reminder"], str) or user["birthday_reminder"].strip()):
|
| 269 |
-
instructions += """
|
| 270 |
-
- **Include a short message to remind them that their birthday is coming up.** \n
|
| 271 |
-
|
| 272 |
-
"""
|
| 273 |
-
|
| 274 |
-
# Additional instructions for input columns
|
| 275 |
-
if self.Core.additional_instructions is not None or str(self.Core.additional_instructions).strip() != '':
|
| 276 |
-
instructions += str(self.Core.additional_instructions)
|
| 277 |
-
|
| 278 |
-
# instructions += self.fire_wall() + "\n"
|
| 279 |
-
|
| 280 |
-
final_instructions = f"""
|
| 281 |
-
{general_instructions}
|
| 282 |
-
|
| 283 |
-
{instructions}
|
| 284 |
-
|
| 285 |
-
"""
|
| 286 |
-
|
| 287 |
-
return final_instructions
|
| 288 |
-
|
| 289 |
-
# --------------------------------------------------------------
|
| 290 |
-
# --------------------------------------------------------------
|
| 291 |
-
|
| 292 |
-
def message_type_instructions(self):
|
| 293 |
-
"""
|
| 294 |
-
create a proper instruction for the message type, regarding the input platform
|
| 295 |
-
:return: message instructions as a string
|
| 296 |
-
"""
|
| 297 |
-
|
| 298 |
-
instructions = ""
|
| 299 |
-
message_style = self.message_style_instructions()
|
| 300 |
-
|
| 301 |
-
if self.Core.platform == "push":
|
| 302 |
-
instructions = f"""
|
| 303 |
-
### ** General Specifications: **
|
| 304 |
-
|
| 305 |
-
- Start directly with the message content without greetings or closing phrases.
|
| 306 |
-
- Avoid using same or similar words so close together in "message" and "header", and make sure there is no grammar problem.
|
| 307 |
-
- The message, vocabulary and sentences **MUST** sound like a natural conversation: something that people normally say in daily conversations.
|
| 308 |
-
- {message_style}
|
| 309 |
-
|
| 310 |
-
"""
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
elif self.Core.platform == "app":
|
| 314 |
-
instructions = f"""
|
| 315 |
-
Message Specifications:
|
| 316 |
-
- The message is an **in app notification**.
|
| 317 |
-
- ** Keep the First sentence as "header" that should be a short personalized eye catching sentence less than 40 character **.
|
| 318 |
-
- ** For the "header", don't use exclamation mark at the end, instead, use a space following with a proper emoji at the end of the "header" (e.g. Great work John 😍) **
|
| 319 |
-
- **Keep the message concise and straightforward**.
|
| 320 |
-
- **Start directly with the message content**; do not include greetings (e.g., "Hello") or closing phrases.
|
| 321 |
-
- Make the message highly **personalized** and **eye-catching**.
|
| 322 |
-
- "Personalized" means the user should feel the message is specifically crafted for them and not generic.
|
| 323 |
-
- **Every word should contribute to maximizing impact and engagement**.
|
| 324 |
-
- {message_style}
|
| 325 |
-
"""
|
| 326 |
-
|
| 327 |
-
return instructions
|
| 328 |
-
|
| 329 |
-
# --------------------------------------------------------------
|
| 330 |
-
# --------------------------------------------------------------
|
| 331 |
-
def message_style_instructions(self):
|
| 332 |
-
"""
|
| 333 |
-
defines the style of the message: e.g. friendly, kind, tone, etc.
|
| 334 |
-
:return: style_instructions(str)
|
| 335 |
-
"""
|
| 336 |
-
|
| 337 |
-
if self.Core.message_style is None:
|
| 338 |
-
message_style = ""
|
| 339 |
-
|
| 340 |
-
else:
|
| 341 |
-
message_style = f"""
|
| 342 |
-
- {self.Core.message_style}.
|
| 343 |
-
"""
|
| 344 |
-
|
| 345 |
-
return message_style
|
| 346 |
-
|
| 347 |
-
# --------------------------------------------------------------
|
| 348 |
-
# --------------------------------------------------------------
|
| 349 |
-
def output_instruction(self):
|
| 350 |
-
"""
|
| 351 |
-
:return: output instructions as a string
|
| 352 |
-
"""
|
| 353 |
-
|
| 354 |
-
example_output = self.example_output()
|
| 355 |
-
general_instructions = f"""
|
| 356 |
-
- The "header" must be less than 30 character.
|
| 357 |
-
- The "message" must be less than 100 character.
|
| 358 |
-
- Preserve special characters and emojis in the message, you are **ONLY ALLOWED**allowed to use {self.Core.get_emoji()} emoji if needed, ONLY ONCE, and ONLY at the end of header or message).
|
| 359 |
-
- Ensure that the output is a valid JSON and not include any text outside the JSON code block.
|
| 360 |
-
"""
|
| 361 |
-
|
| 362 |
-
instructions = f"""
|
| 363 |
-
Expected output structure:
|
| 364 |
-
|
| 365 |
-
{{
|
| 366 |
-
"header": "Generated title",
|
| 367 |
-
"message": "Generated message",
|
| 368 |
-
}}
|
| 369 |
-
|
| 370 |
-
{general_instructions}
|
| 371 |
-
"""
|
| 372 |
-
|
| 373 |
-
output_instructions = f"""
|
| 374 |
-
### **Output instructions**:
|
| 375 |
-
|
| 376 |
-
{example_output}
|
| 377 |
-
{instructions}
|
| 378 |
-
"""
|
| 379 |
-
|
| 380 |
-
return output_instructions
|
| 381 |
-
|
| 382 |
-
# --------------------------------------------------------------
|
| 383 |
-
# --------------------------------------------------------------
|
| 384 |
-
def example_output(self):
|
| 385 |
-
"""
|
| 386 |
-
returns an example output (1-shot) to guide the LLM
|
| 387 |
-
:return: example output
|
| 388 |
-
"""
|
| 389 |
-
|
| 390 |
-
if self.Core.sample_example is None:
|
| 391 |
-
|
| 392 |
-
return ""
|
| 393 |
-
|
| 394 |
-
else:
|
| 395 |
-
# one shot prompting
|
| 396 |
-
example = f"""
|
| 397 |
-
Below are some acceptable examples. Create a header and message that follows the same style, tone, vocabulary, and characteristics of followed examples.
|
| 398 |
-
|
| 399 |
-
### **Good Examples:**
|
| 400 |
-
{self.Core.sample_example}
|
| 401 |
-
"""
|
| 402 |
-
|
| 403 |
-
return example
|
| 404 |
-
|
| 405 |
-
# --------------------------------------------------------------
|
| 406 |
-
# --------------------------------------------------------------
|
| 407 |
-
|
| 408 |
-
def task_instructions(self):
|
| 409 |
-
"""
|
| 410 |
-
creating instructions for specifying the tasks
|
| 411 |
-
:return:
|
| 412 |
-
"""
|
| 413 |
-
|
| 414 |
-
if self.Core.involve_recsys_result and self.Core.messaging_mode != "message":
|
| 415 |
-
recsys_task = """
|
| 416 |
-
- Create a perfect message and the header following the instructions, using the user's information and the content that we want to recommend.
|
| 417 |
-
- Use the instructions to include the recommended content in the message.
|
| 418 |
-
|
| 419 |
-
"""
|
| 420 |
-
else:
|
| 421 |
-
recsys_task = ""
|
| 422 |
-
|
| 423 |
-
message_task = """
|
| 424 |
-
- Create a header and a message considering the information and instructions mentioned. Your output format should be based on **Output instructions**."""
|
| 425 |
-
|
| 426 |
-
instructions = f"""
|
| 427 |
-
### Tasks:
|
| 428 |
-
{recsys_task}
|
| 429 |
-
{message_task}
|
| 430 |
-
"""
|
| 431 |
-
|
| 432 |
-
return instructions
|
| 433 |
-
|
| 434 |
-
# =======================================================
|
| 435 |
-
def redirect_to_for_you(self):
|
| 436 |
-
"""
|
| 437 |
-
instructions to redirect the user to For you section or homepage
|
| 438 |
-
:return:
|
| 439 |
-
"""
|
| 440 |
-
|
| 441 |
-
instructions = f"""
|
| 442 |
-
** Note: **
|
| 443 |
-
We don't recommend a specific content and by opening the message, the user will be redirected to a page that contains personalized recommendations for them.
|
| 444 |
-
\n
|
| 445 |
-
"""
|
| 446 |
-
return instructions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/SnowFlakeConnection.py
DELETED
|
@@ -1,262 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
This class create a connection to Snowflake, run queries (read and write)
|
| 3 |
-
"""
|
| 4 |
-
import json
|
| 5 |
-
|
| 6 |
-
import numpy as np
|
| 7 |
-
import pandas as pd
|
| 8 |
-
from snowflake.snowpark import Session
|
| 9 |
-
from sympy.strategies.branch import condition
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
class SnowFlakeConn:
|
| 13 |
-
def __init__(self, session, brand):
|
| 14 |
-
self. session = session
|
| 15 |
-
self.brand = brand
|
| 16 |
-
|
| 17 |
-
self.final_columns = ['user_id', "email", "user_info", "permission", "expiration_date", "recsys_result", "message", "brand", "recommendation", "segment_name", "timestamp"]
|
| 18 |
-
|
| 19 |
-
self.campaign_id = {
|
| 20 |
-
"singeo": 460,
|
| 21 |
-
"pianote": 457,
|
| 22 |
-
"guitareo": 458,
|
| 23 |
-
"drumeo": 392
|
| 24 |
-
}
|
| 25 |
-
|
| 26 |
-
# ---------------------------------------------------------------
|
| 27 |
-
# ---------------------------------------------------------------
|
| 28 |
-
def run_read_query(self, query, data):
|
| 29 |
-
"""
|
| 30 |
-
Executes a SQL query on Snowflake that fetch the data
|
| 31 |
-
:return: Pandas dataframe containing the query results
|
| 32 |
-
"""
|
| 33 |
-
|
| 34 |
-
# Connect to Snowflake
|
| 35 |
-
try:
|
| 36 |
-
dataframe = self.session.sql(query).to_pandas()
|
| 37 |
-
dataframe.columns = dataframe.columns.str.lower()
|
| 38 |
-
print(f"reading {data} table successfully")
|
| 39 |
-
return dataframe
|
| 40 |
-
except Exception as e:
|
| 41 |
-
print(f"Error in creating/updating table: {e}")
|
| 42 |
-
|
| 43 |
-
# ---------------------------------------------------------------
|
| 44 |
-
# ---------------------------------------------------------------
|
| 45 |
-
def is_json_parsed_to_collection(self, s):
|
| 46 |
-
try:
|
| 47 |
-
parsed = json.loads(s)
|
| 48 |
-
return isinstance(parsed, (dict, list))
|
| 49 |
-
except:
|
| 50 |
-
return False
|
| 51 |
-
# ---------------------------------------------------------------
|
| 52 |
-
# ---------------------------------------------------------------
|
| 53 |
-
def store_df_to_snowflake(self, table_name, dataframe, database="ONLINE_RECSYS", schema="GENERATED_DATA"):
|
| 54 |
-
"""
|
| 55 |
-
Executes a SQL query on Snowflake that write the preprocessed data on new tables
|
| 56 |
-
:param query: SQL query string to be executed
|
| 57 |
-
:return: None
|
| 58 |
-
"""
|
| 59 |
-
|
| 60 |
-
try:
|
| 61 |
-
self.session.use_database(database)
|
| 62 |
-
self.session.use_schema(schema)
|
| 63 |
-
|
| 64 |
-
dataframe = dataframe.reset_index(drop=True)
|
| 65 |
-
dataframe.columns = dataframe.columns.str.upper()
|
| 66 |
-
|
| 67 |
-
self.session.write_pandas(df=dataframe,
|
| 68 |
-
table_name=table_name.strip().upper(),
|
| 69 |
-
auto_create_table=True,
|
| 70 |
-
overwrite=True,
|
| 71 |
-
use_logical_type=True)
|
| 72 |
-
print(f"Data inserted into {table_name} successfully.")
|
| 73 |
-
|
| 74 |
-
except Exception as e:
|
| 75 |
-
print(f"Error in creating/updating/inserting table: {e}")
|
| 76 |
-
|
| 77 |
-
# ---------------------------------------------------------------
|
| 78 |
-
# ---------------------------------------------------------------
|
| 79 |
-
def get_data(self, data, list_of_ids=None):
|
| 80 |
-
"""
|
| 81 |
-
valid Data is = {users, contents, interactions, recsys, popular_contents}
|
| 82 |
-
:param data:
|
| 83 |
-
:return:
|
| 84 |
-
"""
|
| 85 |
-
valid_data = {'users', 'contents', 'interactions', 'recsys', 'popular_contents'}
|
| 86 |
-
|
| 87 |
-
if data not in valid_data:
|
| 88 |
-
raise ValueError(f"Invalid data type: {data}")
|
| 89 |
-
|
| 90 |
-
# Construct the method name based on the input
|
| 91 |
-
method_name = f"_get_{data}"
|
| 92 |
-
|
| 93 |
-
# Retrieve the method dynamically
|
| 94 |
-
method = getattr(self, method_name, None)
|
| 95 |
-
if method is None:
|
| 96 |
-
raise NotImplementedError(f"The method {method_name} is not implemented.")
|
| 97 |
-
|
| 98 |
-
query = method(list_of_ids)
|
| 99 |
-
data = self.run_read_query(query, data)
|
| 100 |
-
|
| 101 |
-
return data
|
| 102 |
-
# ---------------------------------------------------------------
|
| 103 |
-
# ---------------------------------------------------------------
|
| 104 |
-
def _get_contents(self, list_of_ids=None):
|
| 105 |
-
query = f"""
|
| 106 |
-
select CONTENT_ID, CONTENT_TYPE, CONTENT_PROFILE as content_info --, CONTENT_PROFILE_VECTOR
|
| 107 |
-
from ONLINE_RECSYS.VECTOR_DB.VECTORIZED_CONTENT
|
| 108 |
-
where BRAND = '{self.brand}'
|
| 109 |
-
"""
|
| 110 |
-
return query
|
| 111 |
-
# ---------------------------------------------------------------
|
| 112 |
-
# ---------------------------------------------------------------
|
| 113 |
-
def _get_users(self, list_of_ids=None):
|
| 114 |
-
|
| 115 |
-
if list_of_ids is not None:
|
| 116 |
-
ids_str = "(" + ", ".join(map(str, list_of_ids)) + ")"
|
| 117 |
-
condition = f"AND USER_ID in {ids_str}"
|
| 118 |
-
else :
|
| 119 |
-
condition = ""
|
| 120 |
-
|
| 121 |
-
query = f"""
|
| 122 |
-
select USER_ID, BRAND, FIRST_NAME, BIRTHDAY, TIMEZONE, EMAIL, CURRENT_TIMESTAMP() AS TIMESTAMP, DIFFICULTY, SELF_REPORT_DIFFICULTY, USER_PROFILE as user_info, PERMISSION, EXPIRATION_DATE,
|
| 123 |
-
DATEDIFF(
|
| 124 |
-
day,
|
| 125 |
-
CURRENT_DATE(),
|
| 126 |
-
CASE
|
| 127 |
-
WHEN DATE_FROM_PARTS(YEAR(CURRENT_DATE()), EXTRACT(MONTH FROM BIRTHDAY), EXTRACT(DAY FROM BIRTHDAY)) < CURRENT_DATE()
|
| 128 |
-
THEN DATE_FROM_PARTS(YEAR(CURRENT_DATE()) + 1, EXTRACT(MONTH FROM BIRTHDAY), EXTRACT(DAY FROM BIRTHDAY))
|
| 129 |
-
ELSE DATE_FROM_PARTS(YEAR(CURRENT_DATE()), EXTRACT(MONTH FROM BIRTHDAY), EXTRACT(DAY FROM BIRTHDAY))
|
| 130 |
-
END) AS birthday_reminder
|
| 131 |
-
from ONLINE_RECSYS.PREPROCESSED.USERS
|
| 132 |
-
where BRAND = '{self.brand}' {condition}
|
| 133 |
-
"""
|
| 134 |
-
return query
|
| 135 |
-
# ---------------------------------------------------------------
|
| 136 |
-
# ---------------------------------------------------------------
|
| 137 |
-
def _get_interactions(self, list_of_ids=None):
|
| 138 |
-
|
| 139 |
-
if list_of_ids is not None:
|
| 140 |
-
ids_str = "(" + ", ".join(map(str, list_of_ids)) + ")"
|
| 141 |
-
condition = f"AND USER_ID in {ids_str}"
|
| 142 |
-
else :
|
| 143 |
-
condition = ""
|
| 144 |
-
|
| 145 |
-
query = f"""
|
| 146 |
-
WITH latest_interactions AS(
|
| 147 |
-
SELECT
|
| 148 |
-
USER_ID, CONTENT_ID, CONTENT_TYPE, EVENT_TEXT, TIMESTAMP,
|
| 149 |
-
ROW_NUMBER() OVER(PARTITION BY USER_ID ORDER BY TIMESTAMP DESC) AS rn
|
| 150 |
-
FROM ONLINE_RECSYS.PREPROCESSED.RECSYS_INTEACTIONS
|
| 151 |
-
WHERE BRAND = '{self.brand}' AND EVENT_TEXT IN('Video Completed', 'Video Playing') {condition})
|
| 152 |
-
|
| 153 |
-
SELECT i.USER_ID, i.CONTENT_ID, i.CONTENT_TYPE, c.content_profile as last_completed_content, i.EVENT_TEXT, i.TIMESTAMP, DATEDIFF('week', i.TIMESTAMP, CURRENT_TIMESTAMP) AS weeks_since_last_interaction
|
| 154 |
-
FROM latest_interactions i
|
| 155 |
-
LEFT JOIN
|
| 156 |
-
ONLINE_RECSYS.VECTOR_DB.VECTORIZED_CONTENT c ON c.CONTENT_ID = i.CONTENT_ID
|
| 157 |
-
WHERE rn = 1;
|
| 158 |
-
"""
|
| 159 |
-
return query
|
| 160 |
-
# ---------------------------------------------------------------
|
| 161 |
-
# ---------------------------------------------------------------
|
| 162 |
-
def _get_recsys(self, list_of_ids=None):
|
| 163 |
-
|
| 164 |
-
if list_of_ids is not None:
|
| 165 |
-
ids_str = "(" + ", ".join(map(str, list_of_ids)) + ")"
|
| 166 |
-
condition = f"WHERE USER_ID in {ids_str}"
|
| 167 |
-
else :
|
| 168 |
-
condition = ""
|
| 169 |
-
|
| 170 |
-
recsys_col = f"{self.brand}_recsys_v3"
|
| 171 |
-
query = f"""
|
| 172 |
-
select USER_ID, {recsys_col} as recsys_result
|
| 173 |
-
from RECSYS_V3.RECSYS_CIO.RECSYS_V3_CUSTOMER_IO_OLD
|
| 174 |
-
{condition}
|
| 175 |
-
"""
|
| 176 |
-
return query
|
| 177 |
-
# ---------------------------------------------------------------
|
| 178 |
-
# ---------------------------------------------------------------
|
| 179 |
-
def _get_popular_contents(self, list_of_ids=None):
|
| 180 |
-
|
| 181 |
-
query = f"""
|
| 182 |
-
select POPULAR_CONTENT
|
| 183 |
-
from RECSYS_V3.RECSYS_CIO.POPULAR_CONTENT_CUSTOMER_IO_OLD
|
| 184 |
-
where brand = '{self.brand.lower()}'
|
| 185 |
-
"""
|
| 186 |
-
|
| 187 |
-
return query
|
| 188 |
-
# ---------------------------------------------------------------
|
| 189 |
-
# ---------------------------------------------------------------
|
| 190 |
-
def extract_id_from_email(self, emails):
|
| 191 |
-
"""
|
| 192 |
-
extracting user_ids from emails
|
| 193 |
-
:param unique_emails:
|
| 194 |
-
:return:
|
| 195 |
-
"""
|
| 196 |
-
|
| 197 |
-
email_list_str = ', '.join(f"'{email}'" for email in emails)
|
| 198 |
-
query = f"""
|
| 199 |
-
SELECT id as USER_ID, email as EMAIL
|
| 200 |
-
FROM STITCH.MUSORA_ECOM_DB.USORA_USERS
|
| 201 |
-
WHERE email IN ({email_list_str})
|
| 202 |
-
"""
|
| 203 |
-
|
| 204 |
-
user_ids_df = self.run_read_query(query, data="User_ids")
|
| 205 |
-
return user_ids_df
|
| 206 |
-
# ---------------------------------------------------------------
|
| 207 |
-
# ---------------------------------------------------------------
|
| 208 |
-
|
| 209 |
-
def adjust_dataframe(self, dataframe):
|
| 210 |
-
"""
|
| 211 |
-
Filter dataframe to only include the columns in self.final_columns.
|
| 212 |
-
Add any missing columns with None values.
|
| 213 |
-
Ensure the final order is consistent with self.final_columns.
|
| 214 |
-
"""
|
| 215 |
-
# Work with a copy so that we don't modify the original input
|
| 216 |
-
final_df = dataframe.copy()
|
| 217 |
-
|
| 218 |
-
# Normalize column names to lower-case for matching (if needed)
|
| 219 |
-
final_df.columns = final_df.columns.str.lower()
|
| 220 |
-
expected_cols = [col.lower() for col in self.final_columns]
|
| 221 |
-
|
| 222 |
-
# Keep only those columns in the expected list
|
| 223 |
-
available = [col for col in final_df.columns if col in expected_cols]
|
| 224 |
-
final_df = final_df[available]
|
| 225 |
-
|
| 226 |
-
# Add missing columns with None values
|
| 227 |
-
for col in expected_cols:
|
| 228 |
-
if col not in final_df.columns:
|
| 229 |
-
final_df[col] = None
|
| 230 |
-
|
| 231 |
-
# Reorder the columns to the desired order
|
| 232 |
-
final_df = final_df[expected_cols]
|
| 233 |
-
|
| 234 |
-
# If you need the column names to match exactly what self.final_columns provides (case-sensitive),
|
| 235 |
-
# you can rename them accordingly.
|
| 236 |
-
rename_mapping = {col.lower(): col for col in self.final_columns}
|
| 237 |
-
final_df.rename(columns=rename_mapping, inplace=True)
|
| 238 |
-
|
| 239 |
-
return final_df
|
| 240 |
-
# ==============================================================
|
| 241 |
-
def get_users_in_campaign(self, brand):
|
| 242 |
-
"""
|
| 243 |
-
creating a query to fetch requested users
|
| 244 |
-
:param brand:
|
| 245 |
-
:return:
|
| 246 |
-
"""
|
| 247 |
-
|
| 248 |
-
camp_id = self.campaign_id[brand]
|
| 249 |
-
|
| 250 |
-
query = f"""
|
| 251 |
-
SELECT email
|
| 252 |
-
FROM CUSTOMER_IO_DATA_SYNCING.ANALYTICS.vw_user_campaign_delivery_channel
|
| 253 |
-
where campaign_id = {str(camp_id)} AND email is not NULL AND TRIM(email) <> ''
|
| 254 |
-
"""
|
| 255 |
-
|
| 256 |
-
users_df = self.run_read_query(query, data=f"{brand}_campaign")
|
| 257 |
-
return users_df
|
| 258 |
-
|
| 259 |
-
# ---------------------------------------------------------------
|
| 260 |
-
# ---------------------------------------------------------------
|
| 261 |
-
def close_connection(self):
|
| 262 |
-
self.session.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/context_validator.py
DELETED
|
@@ -1,302 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import time
|
| 3 |
-
import openai
|
| 4 |
-
from openai import OpenAI
|
| 5 |
-
from tqdm import tqdm
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
class Validator:
|
| 10 |
-
"""
|
| 11 |
-
LLM-based personalized message generator:
|
| 12 |
-
"""
|
| 13 |
-
|
| 14 |
-
def __init__(self, api_key):
|
| 15 |
-
|
| 16 |
-
# will be set by the user
|
| 17 |
-
self.validator_instructions = None
|
| 18 |
-
self.api_key = api_key
|
| 19 |
-
self.model = "gpt-4o-mini"
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
# to trace the number of tokens and estimate the cost if needed
|
| 23 |
-
self.temp_token_counter = 0
|
| 24 |
-
self.total_tokens = {
|
| 25 |
-
'prompt_tokens': 0,
|
| 26 |
-
'completion_tokens': 0,
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
# -------------------------------------------------------------------
|
| 30 |
-
def set_openai_api(self, openai_key):
|
| 31 |
-
"""
|
| 32 |
-
Setting template with placeholders manually connection
|
| 33 |
-
:param template: a string with placeholders
|
| 34 |
-
:return:
|
| 35 |
-
"""
|
| 36 |
-
self.api_key = openai_key
|
| 37 |
-
|
| 38 |
-
# -------------------------------------------------------------------
|
| 39 |
-
def context_prompt(self):
|
| 40 |
-
|
| 41 |
-
instructions = """
|
| 42 |
-
You are a text moderator and you should parse the input text. based on below instructions. you should decide if
|
| 43 |
-
the input text is a valid input or not.
|
| 44 |
-
"""
|
| 45 |
-
return instructions
|
| 46 |
-
|
| 47 |
-
# -------------------------------------------------------------------
|
| 48 |
-
def initial_prompt(self):
|
| 49 |
-
|
| 50 |
-
instructions = """You are a helpful assistant at Musora, an online music education platform that helps users
|
| 51 |
-
learn music. Our students will provide user-generated-context such as comments and forums on engaging musical
|
| 52 |
-
contents like songs, lessons, workouts or other type of musical and educational content. Your task is
|
| 53 |
-
to determine if the input text provided by our student is a valid text or not.
|
| 54 |
-
|
| 55 |
-
"""
|
| 56 |
-
return instructions
|
| 57 |
-
|
| 58 |
-
# -------------------------------------------------------------------
|
| 59 |
-
def set_validator_instructions(self, valid_instructions="", invalid_instructions=""):
|
| 60 |
-
|
| 61 |
-
instructions = f"""
|
| 62 |
-
** The text is INValid if it falls into any of the below criteria **:
|
| 63 |
-
|
| 64 |
-
{invalid_instructions}
|
| 65 |
-
{self.fire_wall()}
|
| 66 |
-
--------------------------
|
| 67 |
-
|
| 68 |
-
Please ensure that the text meets the following criteria to be considered **valid**:
|
| 69 |
-
|
| 70 |
-
{valid_instructions}
|
| 71 |
-
{self.default_valid_text()}
|
| 72 |
-
"""
|
| 73 |
-
|
| 74 |
-
self.validator_instructions = instructions
|
| 75 |
-
|
| 76 |
-
# -------------------------------------------------------------------
|
| 77 |
-
def output_instruction(self):
|
| 78 |
-
"""
|
| 79 |
-
:return: output instructions as a string
|
| 80 |
-
"""
|
| 81 |
-
|
| 82 |
-
output_instructions = """
|
| 83 |
-
** Task: **
|
| 84 |
-
- **Based on the input text, the music educational nature of our contents, and instructions about validating the student's input, check if the text is a valid input or not.**
|
| 85 |
-
- **Your output should be strictly "True" if it is a Valid text, or "False" if it not a valid text.**
|
| 86 |
-
- **You should provide the output in JSON format where the key is "valid"** - **Do not include any text outside the JSON code block**.
|
| 87 |
-
|
| 88 |
-
Your response should be in JSON format with the following structure:
|
| 89 |
-
|
| 90 |
-
example of a VALID text:
|
| 91 |
-
|
| 92 |
-
{
|
| 93 |
-
"valid": "True",
|
| 94 |
-
}
|
| 95 |
-
|
| 96 |
-
Example of an INVALID text:
|
| 97 |
-
|
| 98 |
-
{
|
| 99 |
-
"valid": "False",
|
| 100 |
-
}
|
| 101 |
-
"""
|
| 102 |
-
return output_instructions
|
| 103 |
-
|
| 104 |
-
# -------------------------------------------------------------------
|
| 105 |
-
def get_llm_response(self, prompt, max_retries=3):
|
| 106 |
-
"""
|
| 107 |
-
sending the prompt to the LLM and get back the response
|
| 108 |
-
"""
|
| 109 |
-
|
| 110 |
-
openai.api_key = self.api_key
|
| 111 |
-
instructions = self.context_prompt()
|
| 112 |
-
client = OpenAI(api_key=self.api_key)
|
| 113 |
-
|
| 114 |
-
for attempt in range(max_retries):
|
| 115 |
-
try:
|
| 116 |
-
response = client.chat.completions.create(
|
| 117 |
-
model=self.model,
|
| 118 |
-
response_format={"type": "json_object"},
|
| 119 |
-
messages=[
|
| 120 |
-
{"role": "system", "content": instructions},
|
| 121 |
-
{"role": "user", "content": prompt}
|
| 122 |
-
],
|
| 123 |
-
max_tokens=500,
|
| 124 |
-
n=1,
|
| 125 |
-
temperature=0.7
|
| 126 |
-
)
|
| 127 |
-
|
| 128 |
-
tokens = {
|
| 129 |
-
'prompt_tokens': response.usage.prompt_tokens,
|
| 130 |
-
'completion_tokens': response.usage.completion_tokens,
|
| 131 |
-
'total_tokens': response.usage.total_tokens
|
| 132 |
-
}
|
| 133 |
-
|
| 134 |
-
try:
|
| 135 |
-
content = response.choices[0].message.content
|
| 136 |
-
# Extract JSON code block
|
| 137 |
-
output = json.loads(content)
|
| 138 |
-
|
| 139 |
-
if 'valid' not in output:
|
| 140 |
-
print(f"'valid' key is missing in response on attempt {attempt + 1}. Retrying...")
|
| 141 |
-
continue # Continue to next attempt
|
| 142 |
-
|
| 143 |
-
else:
|
| 144 |
-
if output["valid"] not in ["True", "False"]:
|
| 145 |
-
print(f"True or False value missing in response on attempt {attempt + 1}. Retrying...")
|
| 146 |
-
continue
|
| 147 |
-
|
| 148 |
-
# validating the JSON
|
| 149 |
-
self.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 150 |
-
self.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 151 |
-
self.temp_token_counter += tokens['prompt_tokens'] + tokens['completion_tokens']
|
| 152 |
-
return output
|
| 153 |
-
|
| 154 |
-
except json.JSONDecodeError:
|
| 155 |
-
print(f"Invalid JSON from LLM on attempt {attempt + 1}. Retrying...")
|
| 156 |
-
|
| 157 |
-
except openai.APIConnectionError as e:
|
| 158 |
-
print("The server could not be reached")
|
| 159 |
-
print(e.__cause__) # an underlying Exception, likely raised within httpx.
|
| 160 |
-
except openai.RateLimitError as e:
|
| 161 |
-
print("A 429 status code was received; we should back off a bit.")
|
| 162 |
-
except openai.APIStatusError as e:
|
| 163 |
-
print("Another non-200-range status code was received")
|
| 164 |
-
print(e.status_code)
|
| 165 |
-
print(e.response)
|
| 166 |
-
|
| 167 |
-
print("Max retries exceeded. Returning empty response.")
|
| 168 |
-
return [], {}
|
| 169 |
-
|
| 170 |
-
# -------------------------------------------------------------------
|
| 171 |
-
def create_validation_prompt(self, input_text):
|
| 172 |
-
|
| 173 |
-
"""
|
| 174 |
-
creating the proper prompt and instructions around the input text
|
| 175 |
-
:param input_text:
|
| 176 |
-
:return:
|
| 177 |
-
"""
|
| 178 |
-
|
| 179 |
-
prompt = f"""
|
| 180 |
-
{self.initial_prompt()}
|
| 181 |
-
|
| 182 |
-
**Input text provided by the Student:**
|
| 183 |
-
{input_text}
|
| 184 |
-
|
| 185 |
-
{self.validator_instructions}
|
| 186 |
-
{self.output_instruction()}
|
| 187 |
-
"""
|
| 188 |
-
|
| 189 |
-
return prompt
|
| 190 |
-
|
| 191 |
-
# -------------------------------------------------------------------
|
| 192 |
-
def validate_dataframe(self, dataframe, target_column, progress_callback=None):
|
| 193 |
-
"""
|
| 194 |
-
generating the prompt for every user based on their text input, generating the results (True or False),
|
| 195 |
-
updating and returning the input dataframe. :return:
|
| 196 |
-
"""
|
| 197 |
-
dataframe["valid"] = None
|
| 198 |
-
start_time = time.time()
|
| 199 |
-
total_users = len(dataframe)
|
| 200 |
-
|
| 201 |
-
for progress, (idx, row) in enumerate(tqdm(dataframe.iterrows(), desc="generating prompts")):
|
| 202 |
-
|
| 203 |
-
if progress_callback is not None:
|
| 204 |
-
progress_callback(progress, total_users)
|
| 205 |
-
input_text = row[target_column]
|
| 206 |
-
prompt = self.create_validation_prompt(input_text)
|
| 207 |
-
response = self.get_llm_response(prompt)
|
| 208 |
-
dataframe.at[idx, "valid"] = response["valid"]
|
| 209 |
-
|
| 210 |
-
current_time = time.time()
|
| 211 |
-
delta = current_time - start_time
|
| 212 |
-
|
| 213 |
-
# Check token limits
|
| 214 |
-
if self.temp_token_counter > 195000 and delta >= 60: # Using a safe margin
|
| 215 |
-
print("Sleeping for 60 seconds to respect the token limit...")
|
| 216 |
-
# reset the token counter
|
| 217 |
-
self.temp_token_counter = 0
|
| 218 |
-
start_time = time.time()
|
| 219 |
-
time.sleep(60) # Sleep for a minute before making new requests
|
| 220 |
-
|
| 221 |
-
return dataframe
|
| 222 |
-
|
| 223 |
-
# -------------------------------------------------------------------
|
| 224 |
-
def validate_text(self, text):
|
| 225 |
-
"""
|
| 226 |
-
generating the prompt for every user based on their text input, generating the results (True or False),
|
| 227 |
-
updating and returning the input dataframe. :return:
|
| 228 |
-
"""
|
| 229 |
-
|
| 230 |
-
prompt = self.create_validation_prompt(text)
|
| 231 |
-
response = self.get_llm_response(prompt)
|
| 232 |
-
return response["valid"]
|
| 233 |
-
|
| 234 |
-
# -------------------------------------------------------------------
|
| 235 |
-
def fire_wall(self):
|
| 236 |
-
"""
|
| 237 |
-
Provide explicit instructions to ensure that sensitive or inappropriate information is identified in the text.
|
| 238 |
-
:return: string
|
| 239 |
-
"""
|
| 240 |
-
fire_wall = """
|
| 241 |
-
As a content moderator, please review the text and ensure it does not contain any of the following:
|
| 242 |
-
|
| 243 |
-
**Disallowed Content Categories:**
|
| 244 |
-
|
| 245 |
-
1. **Sensitive Personal Information**: personal data such as phone numbers, email addresses, or other identifying information.
|
| 246 |
-
|
| 247 |
-
2. **Offensive or Discriminatory Language**: Hate speech, harassment, bullying, or any derogatory remarks targeting individuals or groups based on race, ethnicity, nationality, religion, gender, sexual orientation, age, disability, or any other characteristic.
|
| 248 |
-
|
| 249 |
-
3. **Sensitive Topics**: Content that discusses or promotes extremist views, political propaganda, or divisive religious beliefs in a manner that could incite hostility.
|
| 250 |
-
|
| 251 |
-
4. **Removed or Restricted Content**: Mentions of songs, media, or features that have been removed or are restricted on our platform.
|
| 252 |
-
|
| 253 |
-
5. **Technical Issues or Bugs**: Any references to glitches, errors, crashes, or other technical problems experienced on the platform.
|
| 254 |
-
|
| 255 |
-
6. ** Language that is excessively angry, aggressive, or includes profanity or vulgar expressions. **
|
| 256 |
-
|
| 257 |
-
7. **Privacy Violations**: Sharing of confidential information or content that infringes on someone's privacy rights.
|
| 258 |
-
|
| 259 |
-
8. **Intellectual Property Violations**: Unauthorized use or distribution of copyrighted material.
|
| 260 |
-
|
| 261 |
-
9. **Defamation**: False statements presented as facts that harm the reputation of an individual or organization.
|
| 262 |
-
|
| 263 |
-
**Examples of Invalid Content:**
|
| 264 |
-
|
| 265 |
-
- "This app is useless and the developers are idiots!"
|
| 266 |
-
- "They removed my favorite song; it sucks"
|
| 267 |
-
- "People who follow [specific religion] are all wrong and should be banned."
|
| 268 |
-
|
| 269 |
-
If the text contains any of the above issues, please flag it as invalid.
|
| 270 |
-
|
| 271 |
-
"""
|
| 272 |
-
return fire_wall
|
| 273 |
-
|
| 274 |
-
# -------------------------------------------------------------------
|
| 275 |
-
def default_valid_text(self):
|
| 276 |
-
"""
|
| 277 |
-
Provide explicit instructions to ensure that the text is appropriate and meets the content guidelines.
|
| 278 |
-
:return: string
|
| 279 |
-
"""
|
| 280 |
-
valid_text = """
|
| 281 |
-
|
| 282 |
-
**Allowed Content Criteria:**
|
| 283 |
-
|
| 284 |
-
1. **Positive Sentiment**: The text should be encouraging, uplifting, or convey a positive emotion.
|
| 285 |
-
|
| 286 |
-
2. **Constructive and Helpful**: Provides valuable insights, advice, or shares personal experiences that could
|
| 287 |
-
benefit others. This can be sharing struggling in practices, challenges or other type of difficulties that might need our attention.
|
| 288 |
-
|
| 289 |
-
3. **Respectful Language**: Uses polite and appropriate language, fostering a friendly and inclusive community environment.
|
| 290 |
-
|
| 291 |
-
**Examples of Valid Content:**
|
| 292 |
-
|
| 293 |
-
- "I love how this app helps me discover new music every day!"
|
| 294 |
-
- "Here's a tip: creating themed playlists can really enhance your listening experience."
|
| 295 |
-
- "I had a great time using this feature during my commute today."
|
| 296 |
-
- "This session is so challenging for me and I'm feeling so much pain in my foot, might go over the workout couple more"
|
| 297 |
-
"""
|
| 298 |
-
return valid_text
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/protection_layer.py
DELETED
|
@@ -1,143 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
protection layer on top of the messaging system to make sure the messages are as expected.
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
from Messaging_system.LLM import LLM
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
# -----------------------------------------------------------------------
|
| 9 |
-
|
| 10 |
-
class ProtectionLayer:
|
| 11 |
-
"""
|
| 12 |
-
Protection layer to double check the generated message:
|
| 13 |
-
"""
|
| 14 |
-
|
| 15 |
-
def __init__(self, CoreConfig):
|
| 16 |
-
|
| 17 |
-
self.Core = CoreConfig
|
| 18 |
-
|
| 19 |
-
self.llm = LLM(CoreConfig)
|
| 20 |
-
# to trace the number of tokens and estimate the cost if needed
|
| 21 |
-
self.total_tokens = {
|
| 22 |
-
'prompt_tokens': 0,
|
| 23 |
-
'completion_tokens': 0,
|
| 24 |
-
}
|
| 25 |
-
|
| 26 |
-
# --------------------------------------------------------------
|
| 27 |
-
# ----------------------------------------------------------------------
|
| 28 |
-
def llm_instructions(self) -> str:
|
| 29 |
-
"""
|
| 30 |
-
System-level directions for the *second-pass* LLM that either approves
|
| 31 |
-
or fixes a push-notification draft produced earlier.
|
| 32 |
-
"""
|
| 33 |
-
|
| 34 |
-
jargon_list = "\n".join(f"- {word}" for word in self.Core.config_file["AI_Jargon"])
|
| 35 |
-
|
| 36 |
-
return f"""
|
| 37 |
-
You are a friendly copy-writer. **Approve the candidate JSON as-is, or
|
| 38 |
-
return a corrected version that obeys every rule below.**
|
| 39 |
-
|
| 40 |
-
ABSOLUTE RULES (override everything else)
|
| 41 |
-
• Output **only** valid JSON with exactly two keys: "header" and "message".
|
| 42 |
-
• Capitalize the **first** word in each value.
|
| 43 |
-
• Keep the original if it already passes every rule.
|
| 44 |
-
|
| 45 |
-
STYLE
|
| 46 |
-
• Sound like everyday speech: casual, friendly, concise.
|
| 47 |
-
• No greetings or sign-offs.
|
| 48 |
-
|
| 49 |
-
JARGON / BANNED CONTENT
|
| 50 |
-
• Never use any of these words (case-insensitive, all forms):
|
| 51 |
-
{jargon_list}
|
| 52 |
-
|
| 53 |
-
• Never use or paraphrase the following phrases (Voice ≠ instrument):
|
| 54 |
-
- Your voice is waiting
|
| 55 |
-
- Your voice awaits
|
| 56 |
-
- Your voice needs you
|
| 57 |
-
- Your voice is calling
|
| 58 |
-
- Your voice deserves more
|
| 59 |
-
- Hit the high notes / Hit those notes
|
| 60 |
-
"""
|
| 61 |
-
|
| 62 |
-
# ----------------------------------------------------------------------
|
| 63 |
-
def get_general_rules(self) -> str:
|
| 64 |
-
"""
|
| 65 |
-
Validation rules applied to both 'header' and 'message'.
|
| 66 |
-
"""
|
| 67 |
-
rules = """
|
| 68 |
-
- No two consecutive sentences may both end with '!'. Change one to '.'.
|
| 69 |
-
- Begin directly with content—no greetings or closings.
|
| 70 |
-
- Fix any grammar or spelling errors.
|
| 71 |
-
- Preserve the exact JSON structure: {"header":"...", "message":"..."}.
|
| 72 |
-
- Remove words that imply recency (e.g. “new”, “latest”, “upcoming”).
|
| 73 |
-
- Capitalize the first word and any proper noun.
|
| 74 |
-
- Would a friendly music instructor casually say such message? If not, rewrite as they would!
|
| 75 |
-
- If no rule is violated, return the JSON unchanged.
|
| 76 |
-
"""
|
| 77 |
-
|
| 78 |
-
return rules
|
| 79 |
-
|
| 80 |
-
# ----------------------------------------------------------------------
|
| 81 |
-
def output_instruction(self) -> str:
|
| 82 |
-
"""
|
| 83 |
-
Explicit output contract (shown last so it’s freshest in token memory).
|
| 84 |
-
"""
|
| 85 |
-
return """
|
| 86 |
-
**Return ONLY JSON, nothing else**
|
| 87 |
-
|
| 88 |
-
{
|
| 89 |
-
"header": "Header text here",
|
| 90 |
-
"message": "Message text here"
|
| 91 |
-
}
|
| 92 |
-
|
| 93 |
-
Constraints
|
| 94 |
-
- "header" ≤ 30 characters (including spaces & punctuation)
|
| 95 |
-
- "message" ≤ 100 characters
|
| 96 |
-
- Do NOT add, remove, or rename keys.
|
| 97 |
-
"""
|
| 98 |
-
|
| 99 |
-
# ----------------------------------------------------------------------
|
| 100 |
-
def get_context(self) -> str:
|
| 101 |
-
"""
|
| 102 |
-
High-level context for the LLM.
|
| 103 |
-
"""
|
| 104 |
-
return (
|
| 105 |
-
"We generated a personalized push-notification. "
|
| 106 |
-
"Please check it against the rules and fix only what is necessary."
|
| 107 |
-
)
|
| 108 |
-
|
| 109 |
-
# ----------------------------------------------------------------------
|
| 110 |
-
def generate_prompt(self, message: str, user: dict) -> str:
|
| 111 |
-
"""
|
| 112 |
-
Combine all pieces into the final prompt sent to the validator LLM.
|
| 113 |
-
"""
|
| 114 |
-
|
| 115 |
-
prompt = f"""
|
| 116 |
-
### Context
|
| 117 |
-
{self.get_context()}
|
| 118 |
-
|
| 119 |
-
### Original JSON
|
| 120 |
-
{message}
|
| 121 |
-
|
| 122 |
-
### Rules
|
| 123 |
-
{self.get_general_rules()}
|
| 124 |
-
|
| 125 |
-
### Output Contract
|
| 126 |
-
{self.output_instruction()}
|
| 127 |
-
"""
|
| 128 |
-
return prompt
|
| 129 |
-
|
| 130 |
-
# --------------------------------------------------------------
|
| 131 |
-
def criticize(self, message, user):
|
| 132 |
-
"""
|
| 133 |
-
criticize the llm response by using additional layer of query
|
| 134 |
-
:return: updated users_df with extracted information and personalize messages.
|
| 135 |
-
"""
|
| 136 |
-
|
| 137 |
-
prompt = self.generate_prompt(message, user)
|
| 138 |
-
response = self.llm.get_response(prompt=prompt, instructions=self.llm_instructions())
|
| 139 |
-
|
| 140 |
-
return response, self.total_tokens
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Messaging_system/sending_time.py
DELETED
|
@@ -1,69 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
calculating sending time for each individual user
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import numpy as np
|
| 6 |
-
from snowflake.snowpark import Session
|
| 7 |
-
import json
|
| 8 |
-
import pandas as pd
|
| 9 |
-
import os
|
| 10 |
-
from dotenv import load_dotenv
|
| 11 |
-
load_dotenv()
|
| 12 |
-
|
| 13 |
-
class PersonalizedTime:
|
| 14 |
-
"""
|
| 15 |
-
This module will calcualte the best tiume to send for each individual users
|
| 16 |
-
"""
|
| 17 |
-
|
| 18 |
-
def calculate_sending_time(self):
|
| 19 |
-
|
| 20 |
-
# fetching data
|
| 21 |
-
session = self.snowflake_connection()
|
| 22 |
-
query = self.fetch_users_time(session)
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
def fetch_users_time(self, session):
|
| 26 |
-
"""
|
| 27 |
-
fetching user's activity data
|
| 28 |
-
:param dataframe:
|
| 29 |
-
:return:
|
| 30 |
-
"""
|
| 31 |
-
|
| 32 |
-
query = self.get_query()
|
| 33 |
-
|
| 34 |
-
# Connect to Snowflake
|
| 35 |
-
try:
|
| 36 |
-
spark_df = session.sql(query).collect()
|
| 37 |
-
dataframe = pd.DataFrame(spark_df)
|
| 38 |
-
print(f"reading content table successfully")
|
| 39 |
-
return dataframe
|
| 40 |
-
except Exception as e:
|
| 41 |
-
print(f"Error in reading table: {e}")
|
| 42 |
-
|
| 43 |
-
def get_query(self):
|
| 44 |
-
|
| 45 |
-
query = """
|
| 46 |
-
|
| 47 |
-
"""
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
def snowflake_connection(self):
|
| 53 |
-
"""
|
| 54 |
-
setting snowflake connection
|
| 55 |
-
:return:
|
| 56 |
-
"""
|
| 57 |
-
|
| 58 |
-
conn = {
|
| 59 |
-
"user": os.getenv('snowflake_user'),
|
| 60 |
-
"password": os.getenv('snowflake_password'),
|
| 61 |
-
"account": os.getenv('snowflake_account'),
|
| 62 |
-
"role": os.getenv('snowflake_role'),
|
| 63 |
-
"database": os.getenv('snowflake_database'),
|
| 64 |
-
"warehouse": os.getenv('snowflake_warehouse'),
|
| 65 |
-
"schema": os.getenv('snowflake_schema'),
|
| 66 |
-
}
|
| 67 |
-
|
| 68 |
-
session = Session.builder.configs(conn).create()
|
| 69 |
-
return session
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
|
@@ -1,16 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: AI Message Generator
|
| 3 |
-
emoji: 🚀
|
| 4 |
-
colorFrom: red
|
| 5 |
-
colorTo: red
|
| 6 |
-
sdk: docker
|
| 7 |
-
app_port: 8501
|
| 8 |
-
tags:
|
| 9 |
-
- streamlit
|
| 10 |
-
pinned: false
|
| 11 |
-
short_description: 'UI for AI Messaging system '
|
| 12 |
-
license: apache-2.0
|
| 13 |
-
---
|
| 14 |
-
|
| 15 |
-
AI messaging system UI
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Singeo_camp.csv
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ai_messaging_system_v2/Data/test_camp.json
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"campaign_view":"singeo_re_engagement",
|
| 3 |
+
"campaign_name": "musora-staff-test-campaign",
|
| 4 |
+
"brand": "singeo",
|
| 5 |
+
"1": {
|
| 6 |
+
"identifier_column": "email",
|
| 7 |
+
"stage": 1,
|
| 8 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 9 |
+
"recsys_contents": [
|
| 10 |
+
"workout",
|
| 11 |
+
"course",
|
| 12 |
+
"quick_tips"
|
| 13 |
+
],
|
| 14 |
+
"involve_recsys_result": true,
|
| 15 |
+
"personalization": true,
|
| 16 |
+
"sample_examples": "Header: Your next lesson is waiting 👇 \n Message: Check it out now and improve your singing!",
|
| 17 |
+
"model": "gemini-2.5-flash-lite"
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"identifier_column": "email",
|
| 21 |
+
"stage": 2,
|
| 22 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 23 |
+
"recsys_contents": [
|
| 24 |
+
"workout",
|
| 25 |
+
"course",
|
| 26 |
+
"quick_tips"
|
| 27 |
+
],
|
| 28 |
+
"involve_recsys_result": true,
|
| 29 |
+
"personalization": true,
|
| 30 |
+
"sample_examples": "Header: It’s a great day to sing 🤩,\n Message: It’s been a few days — warm up with a quick lesson!",
|
| 31 |
+
"model": "gemini-2.5-flash-lite"
|
| 32 |
+
},
|
| 33 |
+
"3": {
|
| 34 |
+
"identifier_column": "email",
|
| 35 |
+
"stage": 3,
|
| 36 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 37 |
+
"recsys_contents": [
|
| 38 |
+
"workout",
|
| 39 |
+
"course",
|
| 40 |
+
"quick_tips"
|
| 41 |
+
],
|
| 42 |
+
"involve_recsys_result": true,
|
| 43 |
+
"personalization": true,
|
| 44 |
+
"sample_examples": "Header: Practice makes progress 💪, \nMessage: You don’t need to be perfect. But you do need practice to reach your goals!",
|
| 45 |
+
"model": "gemini-2.5-flash-lite"
|
| 46 |
+
},
|
| 47 |
+
"4": {
|
| 48 |
+
"identifier_column": "email",
|
| 49 |
+
"stage": 4,
|
| 50 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 51 |
+
"recsys_contents": [
|
| 52 |
+
"workout",
|
| 53 |
+
"course",
|
| 54 |
+
"quick_tips"
|
| 55 |
+
],
|
| 56 |
+
"involve_recsys_result": true,
|
| 57 |
+
"personalization": true,
|
| 58 |
+
"sample_examples": "Header: Never stop learning, \nMessage: Take a lesson today and get back on track!",
|
| 59 |
+
"model": "gemini-2.5-flash-lite"
|
| 60 |
+
},
|
| 61 |
+
"5": {
|
| 62 |
+
"identifier_column": "email",
|
| 63 |
+
"stage": 5,
|
| 64 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 65 |
+
"recsys_contents": [
|
| 66 |
+
"workout",
|
| 67 |
+
"course",
|
| 68 |
+
"quick_tips"
|
| 69 |
+
],
|
| 70 |
+
"involve_recsys_result": true,
|
| 71 |
+
"personalization": true,
|
| 72 |
+
"sample_examples": "Header: Get back on track ⏱️\nMessage: It’s been two weeks since your last practice session. Take a lesson today!",
|
| 73 |
+
"model": "gemini-2.5-flash-lite"
|
| 74 |
+
},
|
| 75 |
+
"6": {
|
| 76 |
+
"identifier_column": "email",
|
| 77 |
+
"stage": 6,
|
| 78 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 79 |
+
"recsys_contents": [
|
| 80 |
+
"workout",
|
| 81 |
+
"course",
|
| 82 |
+
"quick_tips"
|
| 83 |
+
],
|
| 84 |
+
"involve_recsys_result": true,
|
| 85 |
+
"personalization": true,
|
| 86 |
+
"sample_examples": "Header: Keep on going!\nMessage: Get back to singing today. It only takes a few minutes!",
|
| 87 |
+
"model": "gemini-2.5-flash-lite"
|
| 88 |
+
},
|
| 89 |
+
"7": {
|
| 90 |
+
"identifier_column": "email",
|
| 91 |
+
"stage": 7,
|
| 92 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 93 |
+
"recsys_contents": [
|
| 94 |
+
"workout",
|
| 95 |
+
"course",
|
| 96 |
+
"quick_tips"
|
| 97 |
+
],
|
| 98 |
+
"involve_recsys_result": true,
|
| 99 |
+
"personalization": true,
|
| 100 |
+
"sample_examples": "Header: Ready to sing? 🎤\nMessage: Let’s get started. Time for a quick practice session!",
|
| 101 |
+
"model": "gemini-2.5-flash-lite"
|
| 102 |
+
},
|
| 103 |
+
"8": {
|
| 104 |
+
"identifier_column": "email",
|
| 105 |
+
"stage": 8,
|
| 106 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 107 |
+
"recsys_contents": [
|
| 108 |
+
"workout",
|
| 109 |
+
"course",
|
| 110 |
+
"quick_tips"
|
| 111 |
+
],
|
| 112 |
+
"involve_recsys_result": true,
|
| 113 |
+
"personalization": true,
|
| 114 |
+
"sample_examples": "Header: Your lesson’s waiting. 📥\nMessage: We want to hear you sing! Dive in today.",
|
| 115 |
+
"model": "gemini-2.5-flash-lite"
|
| 116 |
+
},
|
| 117 |
+
"9": {
|
| 118 |
+
"identifier_column": "email",
|
| 119 |
+
"stage": 9,
|
| 120 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 121 |
+
"recsys_contents": [
|
| 122 |
+
"workout",
|
| 123 |
+
"course",
|
| 124 |
+
"quick_tips"
|
| 125 |
+
],
|
| 126 |
+
"involve_recsys_result": true,
|
| 127 |
+
"personalization": true,
|
| 128 |
+
"sample_examples": "Header: Time for a comeback!\nMessage: We haven’t seen you in 25 days. This will help get you back into the groove!",
|
| 129 |
+
"model": "gemini-2.5-flash-lite"
|
| 130 |
+
},
|
| 131 |
+
"10": {
|
| 132 |
+
"identifier_column": "email",
|
| 133 |
+
"stage": 10,
|
| 134 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 135 |
+
"recsys_contents": [
|
| 136 |
+
"workout",
|
| 137 |
+
"course",
|
| 138 |
+
"quick_tips"
|
| 139 |
+
],
|
| 140 |
+
"involve_recsys_result": true,
|
| 141 |
+
"personalization": true,
|
| 142 |
+
"sample_examples": "Header: Have you been practicing?\nMessage: You have a lovely voice. We’d love to hear it again!",
|
| 143 |
+
"model": "gemini-2.5-flash-lite"
|
| 144 |
+
},
|
| 145 |
+
"11": {
|
| 146 |
+
"identifier_column": "email",
|
| 147 |
+
"stage": 11,
|
| 148 |
+
"segment_info": "Students who haven't practiced and logged into the app after at least 3 days.",
|
| 149 |
+
"recsys_contents": [
|
| 150 |
+
"workout",
|
| 151 |
+
"course",
|
| 152 |
+
"quick_tips"
|
| 153 |
+
],
|
| 154 |
+
"involve_recsys_result": true,
|
| 155 |
+
"personalization": true,
|
| 156 |
+
"sample_examples": "Header: We Miss You 😔Message: All your lessons will just be here when you get back!",
|
| 157 |
+
"model": "gemini-2.5-flash-lite"
|
| 158 |
+
}
|
| 159 |
+
}
|
ai_messaging_system_v2/Data/test_staff.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
email
|
| 2 | |
| 3 | |
| 4 | |
| 5 | |
| 6 | |
| 7 | |
| 8 | |
| 9 | |
| 10 | |
| 11 |
ai_messaging_system_v2/Data/ui_output/.gitkeep
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UI output directory
|
| 2 |
+
# This directory stores CSV output files when running in UI mode
|
| 3 |
+
# Files are cleared on each new UI run
|
ai_messaging_system_v2/Data/ui_output/message_cost.csv
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
brand,campaign_name,number_of_messages,model,stage,total_prompt_tokens,total_completion_tokens,total_cost,timestamp
|
| 2 |
+
drumeo,UI-Test-Campaign-Re-engagement,5,gpt-5-nano,1,4322,267,0.0003229,2026-01-12 01:39:36.723734+00:00
|
| 3 |
+
drumeo,UI-Test-Campaign-Re-engagement,5,gpt-5-nano,1,3862,238,0.0002883,2026-01-12 01:39:38.216117+00:00
|
| 4 |
+
drumeo,UI-Test-Campaign-Re-engagement,5,gemini-2.5-flash-lite,2,4467,208,0.0005298999999999999,2026-01-12 01:39:43.754321+00:00
|
| 5 |
+
drumeo,UI-Test-Campaign-Re-engagement,5,gpt-5-nano,2,4194,227,0.0003005,2026-01-12 01:39:45.335461+00:00
|
| 6 |
+
drumeo,UI-Test-Campaign-Re-engagement,5,gemini-2.5-flash-lite,3,4845,211,0.0005689000000000001,2026-01-12 01:39:49.489488+00:00
|
| 7 |
+
drumeo,UI-Test-Campaign-Re-engagement,5,gpt-5-nano,3,4379,230,0.00031095000000000005,2026-01-12 01:39:52.901870+00:00
|
ai_messaging_system_v2/Data/ui_output/messages_a_drumeo_20260111_2039.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ai_messaging_system_v2/Data/ui_output/messages_b_drumeo_20260111_2039.csv
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
user_id,email,first_name,birthday,birthday_reminder,user_info,instrument,platform,permission,expiration_date,recsys_result,message,brand,recommendation,recommendation_info,campaign_name,timestamp,stage
|
| 2 |
+
876151,[email protected],,,,"Music Styles: Country, Funk, Hip-Hop/Rap, Jazz, Metal, Pop, Rock, Soul
|
| 3 |
+
Music Topics: Composition, Creativity, Electronic Drums, Feet, Fills, Grooves, Hands, Independence, Performance, Rudiments, Theory
|
| 4 |
+
Goals: Explore techniques, genres, and styles, Improve drumming technique, Learn as many songs as possible, Learn drumming theory, Stick to a consistent practice routine
|
| 5 |
+
",Drum,push,plus,2026-01-28 17:27:30-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""1"": {""header"": ""Time for your drill session 👇"", ""message"": ""Explore tailored grooves and rudiments to fit your style in Recommendations."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:28.068450+00:00,1
|
| 6 |
+
164216,[email protected],Carl,,,"Music Styles: Hip-Hop/Rap, Rock
|
| 7 |
+
Music Topics: Electronic Drums, Feet, Fills, Grooves, Hands
|
| 8 |
+
Goals: Improve drumming technique, Learn as many songs as possible, Stick to a consistent practice routine
|
| 9 |
+
",Drum,push,plus,2026-01-20 12:04:08-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""1"": {""header"": ""Carl, time to practice"", ""message"": ""Open your Recommendations: a structured drill set to sharpen hands, grooves, and feet."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:28.068450+00:00,1
|
| 10 |
+
223559,[email protected],Rene,1949-08-24,,"Music Styles: Blues, Country, Rock
|
| 11 |
+
Music Topics: Feet, Fills, Grooves, Hands
|
| 12 |
+
Goals: Improve drumming technique
|
| 13 |
+
",Drum,push,plus,2026-03-06 12:03:54-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""1"": {""header"": ""Rene, your drums crave grooves"", ""message"": ""Check your personalized recommendations and build hands, feet, and groove skills together."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:28.068450+00:00,1
|
| 14 |
+
881978,[email protected],,,,"Music Styles: Funk, Jazz, Rock
|
| 15 |
+
Music Topics: Feet, Grooves
|
| 16 |
+
Goals: Explore techniques, genres, and styles, Learn drumming theory
|
| 17 |
+
",Drum,push,plus,2026-09-07 13:49:53-07:00,https://www.musora.com/drumeo/lessons/recommended,"{""1"": {""header"": ""Your drum groove awaits 👇"", ""message"": ""Jump back in with tailored picks: funk, jazz, and rock grooves plus feet technique. Your path continues here."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:28.068450+00:00,1
|
| 18 |
+
560107,[email protected],,,,"Music Styles: CCM/Worship, Country, Jazz, Pop, Rock
|
| 19 |
+
Music Topics: Creativity, Feet, Fills, Grooves, Hands, Performance
|
| 20 |
+
Goals: Stick to a consistent practice routine
|
| 21 |
+
",Drum,push,plus,2026-02-25 00:26:29-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""1"": {""header"": ""Back to the groove, drummer"", ""message"": ""Jump into your personalized recommendations and groove with hands, feet, and rhythm today."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:28.068450+00:00,1
|
| 22 |
+
876151,[email protected],,,,"Music Styles: Country, Funk, Hip-Hop/Rap, Jazz, Metal, Pop, Rock, Soul
|
| 23 |
+
Music Topics: Composition, Creativity, Electronic Drums, Feet, Fills, Grooves, Hands, Independence, Performance, Rudiments, Theory
|
| 24 |
+
Goals: Explore techniques, genres, and styles, Improve drumming technique, Learn as many songs as possible, Learn drumming theory, Stick to a consistent practice routine
|
| 25 |
+
",Drum,push,plus,2026-01-28 17:27:30-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""2"": {""header"": ""Drum path for you 🎯"", ""message"": ""Jump into tailored grooves and rudiments in Recommendations—your next step awaits."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:39.017914+00:00,2
|
| 26 |
+
164216,[email protected],Carl,,,"Music Styles: Hip-Hop/Rap, Rock
|
| 27 |
+
Music Topics: Electronic Drums, Feet, Fills, Grooves, Hands
|
| 28 |
+
Goals: Improve drumming technique, Learn as many songs as possible, Stick to a consistent practice routine
|
| 29 |
+
",Drum,push,plus,2026-01-20 12:04:08-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""2"": {""header"": ""Carl, keep the groove flowing"", ""message"": ""Check your Recommendations for a focused drill set on feet, hands, and grooves."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:39.017914+00:00,2
|
| 30 |
+
223559,[email protected],Rene,1949-08-24,,"Music Styles: Blues, Country, Rock
|
| 31 |
+
Music Topics: Feet, Fills, Grooves, Hands
|
| 32 |
+
Goals: Improve drumming technique
|
| 33 |
+
",Drum,push,plus,2026-03-06 12:03:54-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""2"": {""header"": ""Rene, tap for grooves ahead"", ""message"": ""Your personalized recommendations await—explore drills on feet, hands, and grooves to progress your technique."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:39.017914+00:00,2
|
| 34 |
+
881978,[email protected],,,,"Music Styles: Funk, Jazz, Rock
|
| 35 |
+
Music Topics: Feet, Grooves
|
| 36 |
+
Goals: Explore techniques, genres, and styles, Learn drumming theory
|
| 37 |
+
",Drum,push,plus,2026-09-07 13:49:53-07:00,https://www.musora.com/drumeo/lessons/recommended,"{""2"": {""header"": ""Back to the groove 🎶"", ""message"": ""Your personalized picks await—explore funk, jazz, and rock focus with feet and groove work, from your path."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:39.017914+00:00,2
|
| 38 |
+
560107,[email protected],,,,"Music Styles: CCM/Worship, Country, Jazz, Pop, Rock
|
| 39 |
+
Music Topics: Creativity, Feet, Fills, Grooves, Hands, Performance
|
| 40 |
+
Goals: Stick to a consistent practice routine
|
| 41 |
+
",Drum,push,plus,2026-02-25 00:26:29-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""2"": {""header"": ""Hands and feet together"", ""message"": ""Open your personalized recommendations and shape a steady practice rhythm today."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:39.017914+00:00,2
|
| 42 |
+
876151,[email protected],,,,"Music Styles: Country, Funk, Hip-Hop/Rap, Jazz, Metal, Pop, Rock, Soul
|
| 43 |
+
Music Topics: Composition, Creativity, Electronic Drums, Feet, Fills, Grooves, Hands, Independence, Performance, Rudiments, Theory
|
| 44 |
+
Goals: Explore techniques, genres, and styles, Improve drumming technique, Learn as many songs as possible, Learn drumming theory, Stick to a consistent practice routine
|
| 45 |
+
",Drum,push,plus,2026-01-28 17:27:30-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""3"": {""header"": ""Your drum path awaits 🥁"", ""message"": ""Dive into tailored grooves and rudiments in Recommendations—crafted for your style and goals."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:47.443090+00:00,3
|
| 46 |
+
164216,[email protected],Carl,,,"Music Styles: Hip-Hop/Rap, Rock
|
| 47 |
+
Music Topics: Electronic Drums, Feet, Fills, Grooves, Hands
|
| 48 |
+
Goals: Improve drumming technique, Learn as many songs as possible, Stick to a consistent practice routine
|
| 49 |
+
",Drum,push,plus,2026-01-20 12:04:08-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""3"": {""header"": ""Carl, keep the groove moving"", ""message"": ""Open your Picks: a hand/feet focused drill set to refine your rhythm and fills."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:47.443090+00:00,3
|
| 50 |
+
223559,[email protected],Rene,1949-08-24,,"Music Styles: Blues, Country, Rock
|
| 51 |
+
Music Topics: Feet, Fills, Grooves, Hands
|
| 52 |
+
Goals: Improve drumming technique
|
| 53 |
+
",Drum,push,plus,2026-03-06 12:03:54-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""3"": {""header"": ""Rene, your groove awaits"", ""message"": ""Open your Recommendations to explore hands, feet, and groove drills tailored to blues, country, and rock."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:47.443090+00:00,3
|
| 54 |
+
881978,[email protected],,,,"Music Styles: Funk, Jazz, Rock
|
| 55 |
+
Music Topics: Feet, Grooves
|
| 56 |
+
Goals: Explore techniques, genres, and styles, Learn drumming theory
|
| 57 |
+
",Drum,push,plus,2026-09-07 13:49:53-07:00,https://www.musora.com/drumeo/lessons/recommended,"{""3"": {""header"": ""Your groove guide awaits"", ""message"": ""Dive into tailored recommendations: funk, jazz, rock rhythms and feet work to expand your groove map."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:47.443090+00:00,3
|
| 58 |
+
560107,[email protected],,,,"Music Styles: CCM/Worship, Country, Jazz, Pop, Rock
|
| 59 |
+
Music Topics: Creativity, Feet, Fills, Grooves, Hands, Performance
|
| 60 |
+
Goals: Stick to a consistent practice routine
|
| 61 |
+
",Drum,push,plus,2026-02-25 00:26:29-08:00,https://www.musora.com/drumeo/lessons/recommended,"{""3"": {""header"": ""Your drum groove awaits"", ""message"": ""Dive into personalized guidance and shape steady rhythm with hands, feet, and creativity today."", ""content_id"": null, ""web_url_path"": ""https://www.musora.com/drumeo/lessons/recommended"", ""title"": null, ""thumbnail_url"": null, ""deeplink"": null}}",drumeo,for_you,Redirecting user to their personalized Recommendations,UI-Test-Campaign-Re-engagement,2026-01-12 01:39:47.443090+00:00,3
|
{Messaging_system → ai_messaging_system_v2/Messaging_system}/CoreConfig.py
RENAMED
|
@@ -3,7 +3,13 @@ the flow of the Program starts from create_personalized_message function
|
|
| 3 |
"""
|
| 4 |
import os
|
| 5 |
import time
|
| 6 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
class CoreConfig:
|
|
@@ -22,11 +28,11 @@ class CoreConfig:
|
|
| 22 |
# LLM configs
|
| 23 |
self.api_key = None # will be set by user
|
| 24 |
self.model = "gpt-4o" # default -> will be set by user
|
| 25 |
-
self.temperature = 0.
|
| 26 |
self.reasoning_model=False
|
| 27 |
|
| 28 |
# will be set by user
|
| 29 |
-
self.personalization=
|
| 30 |
self.CTA = None
|
| 31 |
self.message_style = None
|
| 32 |
self.sample_example = None
|
|
@@ -40,6 +46,10 @@ class CoreConfig:
|
|
| 40 |
self.consider_last_interaction = True
|
| 41 |
self.additional_instructions = None
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
# to trace the number of tokens and estimate the cost if needed
|
| 44 |
self.temp_token_counter = 0
|
| 45 |
self.total_tokens = {
|
|
@@ -51,7 +61,8 @@ class CoreConfig:
|
|
| 51 |
self.recsys_result = None
|
| 52 |
self.recsys_contents = ["song", "workout", "course", "quick_tips"]
|
| 53 |
self.content_info = None
|
| 54 |
-
self.involve_recsys_result =
|
|
|
|
| 55 |
self.popular_contents_df = None
|
| 56 |
|
| 57 |
# Additional_info
|
|
@@ -64,14 +75,16 @@ class CoreConfig:
|
|
| 64 |
self.wait_time = None
|
| 65 |
|
| 66 |
# Instantiate the connection to Snowflake
|
| 67 |
-
self.SF =
|
| 68 |
|
| 69 |
-
#
|
| 70 |
-
self.
|
| 71 |
|
| 72 |
# brand's voice language
|
| 73 |
self.brand_voice = self.get_brand_voice()
|
| 74 |
|
|
|
|
|
|
|
| 75 |
# ===============================================================
|
| 76 |
def get_brand_voice(self):
|
| 77 |
"""
|
|
@@ -79,9 +92,11 @@ class CoreConfig:
|
|
| 79 |
Returns:
|
| 80 |
str: The brand voice phrases if file exists, otherwise None.
|
| 81 |
"""
|
| 82 |
-
|
|
|
|
|
|
|
| 83 |
|
| 84 |
-
if not
|
| 85 |
return None
|
| 86 |
|
| 87 |
with open(file_path, 'r', encoding='utf-8') as f:
|
|
@@ -135,16 +150,6 @@ class CoreConfig:
|
|
| 135 |
else:
|
| 136 |
print(f"{messaging_mode} is not a valid messaging mode. available modes are: \n {valid_modes}")
|
| 137 |
|
| 138 |
-
# --------------------------------------------------------------
|
| 139 |
-
# --------------------------------------------------------------
|
| 140 |
-
def set_openai_api(self, openai_key):
|
| 141 |
-
"""
|
| 142 |
-
Setting openai key
|
| 143 |
-
:param openai_key: a string with placeholders
|
| 144 |
-
:return:
|
| 145 |
-
"""
|
| 146 |
-
self.api_key = openai_key
|
| 147 |
-
|
| 148 |
# --------------------------------------------------------------
|
| 149 |
# --------------------------------------------------------------
|
| 150 |
def set_number_of_samples(self, number_of_samples):
|
|
@@ -175,28 +180,6 @@ class CoreConfig:
|
|
| 175 |
"""
|
| 176 |
self.segment_info = segment_info
|
| 177 |
|
| 178 |
-
|
| 179 |
-
# --------------------------------------------------------------
|
| 180 |
-
# --------------------------------------------------------------
|
| 181 |
-
def set_number_of_messages(self, number_of_messages=1, instructionset=None, subsequent_examples=None):
|
| 182 |
-
"""
|
| 183 |
-
If the number of messages is more than 1, we will set self.subsequence_messages to a dictionary where
|
| 184 |
-
the key is an integer from 1 to number_of_messages, and the values are corresponding instructions in instructionset.
|
| 185 |
-
:param number_of_messages: int
|
| 186 |
-
:param instructionset: list of instructions
|
| 187 |
-
:return:
|
| 188 |
-
"""
|
| 189 |
-
|
| 190 |
-
if number_of_messages == 1:
|
| 191 |
-
self.subsequence_messages = {1: None}
|
| 192 |
-
else:
|
| 193 |
-
if instructionset is not None:
|
| 194 |
-
self.subsequence_messages = instructionset
|
| 195 |
-
if subsequent_examples is not None:
|
| 196 |
-
self.subsequent_examples = subsequent_examples
|
| 197 |
-
else:
|
| 198 |
-
raise ValueError("Instructionset must have instructions for each subsequence message")
|
| 199 |
-
|
| 200 |
# --------------------------------------------------------------
|
| 201 |
# --------------------------------------------------------------
|
| 202 |
|
|
@@ -243,7 +226,7 @@ class CoreConfig:
|
|
| 243 |
delta = current_time - self.start_time
|
| 244 |
|
| 245 |
# Check token limits
|
| 246 |
-
if self.temp_token_counter >
|
| 247 |
print("Sleeping for few seconds to respect the token limit...")
|
| 248 |
# reset the token counter
|
| 249 |
self.temp_token_counter = 0
|
|
@@ -272,13 +255,13 @@ class CoreConfig:
|
|
| 272 |
|
| 273 |
# --------------------------------------------------------------
|
| 274 |
# --------------------------------------------------------------
|
| 275 |
-
def set_segment_name(self,
|
| 276 |
"""
|
| 277 |
saving the current process
|
| 278 |
:return:
|
| 279 |
"""
|
| 280 |
|
| 281 |
-
self.
|
| 282 |
# ==============================================================
|
| 283 |
|
| 284 |
def set_personalization(self):
|
|
|
|
| 3 |
"""
|
| 4 |
import os
|
| 5 |
import time
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
try:
|
| 8 |
+
from ..database import DatabaseManager
|
| 9 |
+
except ImportError:
|
| 10 |
+
import sys
|
| 11 |
+
sys.path.append(str(Path(__file__).parent.parent))
|
| 12 |
+
from database import DatabaseManager
|
| 13 |
|
| 14 |
|
| 15 |
class CoreConfig:
|
|
|
|
| 28 |
# LLM configs
|
| 29 |
self.api_key = None # will be set by user
|
| 30 |
self.model = "gpt-4o" # default -> will be set by user
|
| 31 |
+
self.temperature = 0.7
|
| 32 |
self.reasoning_model=False
|
| 33 |
|
| 34 |
# will be set by user
|
| 35 |
+
self.personalization=True
|
| 36 |
self.CTA = None
|
| 37 |
self.message_style = None
|
| 38 |
self.sample_example = None
|
|
|
|
| 46 |
self.consider_last_interaction = True
|
| 47 |
self.additional_instructions = None
|
| 48 |
|
| 49 |
+
# Campaign and per-message instructions (new feature)
|
| 50 |
+
self.campaign_instructions = None
|
| 51 |
+
self.per_message_instructions = None
|
| 52 |
+
|
| 53 |
# to trace the number of tokens and estimate the cost if needed
|
| 54 |
self.temp_token_counter = 0
|
| 55 |
self.total_tokens = {
|
|
|
|
| 61 |
self.recsys_result = None
|
| 62 |
self.recsys_contents = ["song", "workout", "course", "quick_tips"]
|
| 63 |
self.content_info = None
|
| 64 |
+
self.involve_recsys_result = True
|
| 65 |
+
self.specific_content_id = None # Force specific content for all users (overrides AI recommendation)
|
| 66 |
self.popular_contents_df = None
|
| 67 |
|
| 68 |
# Additional_info
|
|
|
|
| 75 |
self.wait_time = None
|
| 76 |
|
| 77 |
# Instantiate the connection to Snowflake
|
| 78 |
+
self.SF = DatabaseManager(session=self.session, brand=self.brand)
|
| 79 |
|
| 80 |
+
# campaign_name
|
| 81 |
+
self.campaign_name = None
|
| 82 |
|
| 83 |
# brand's voice language
|
| 84 |
self.brand_voice = self.get_brand_voice()
|
| 85 |
|
| 86 |
+
self.openai_fallback_enabled=False
|
| 87 |
+
|
| 88 |
# ===============================================================
|
| 89 |
def get_brand_voice(self):
|
| 90 |
"""
|
|
|
|
| 92 |
Returns:
|
| 93 |
str: The brand voice phrases if file exists, otherwise None.
|
| 94 |
"""
|
| 95 |
+
# Get the directory relative to the script location
|
| 96 |
+
script_dir = Path(__file__).parent.parent.resolve() # Go up one level from Messaging_system
|
| 97 |
+
file_path = script_dir / 'Config_files' / f'{self.brand.lower()}_phrases.txt'
|
| 98 |
|
| 99 |
+
if not file_path.exists():
|
| 100 |
return None
|
| 101 |
|
| 102 |
with open(file_path, 'r', encoding='utf-8') as f:
|
|
|
|
| 150 |
else:
|
| 151 |
print(f"{messaging_mode} is not a valid messaging mode. available modes are: \n {valid_modes}")
|
| 152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
# --------------------------------------------------------------
|
| 154 |
# --------------------------------------------------------------
|
| 155 |
def set_number_of_samples(self, number_of_samples):
|
|
|
|
| 180 |
"""
|
| 181 |
self.segment_info = segment_info
|
| 182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
# --------------------------------------------------------------
|
| 184 |
# --------------------------------------------------------------
|
| 185 |
|
|
|
|
| 226 |
delta = current_time - self.start_time
|
| 227 |
|
| 228 |
# Check token limits
|
| 229 |
+
if self.temp_token_counter > 3995000 and delta <= 60: # Using a safe margin
|
| 230 |
print("Sleeping for few seconds to respect the token limit...")
|
| 231 |
# reset the token counter
|
| 232 |
self.temp_token_counter = 0
|
|
|
|
| 255 |
|
| 256 |
# --------------------------------------------------------------
|
| 257 |
# --------------------------------------------------------------
|
| 258 |
+
def set_segment_name(self, campaign_name):
|
| 259 |
"""
|
| 260 |
saving the current process
|
| 261 |
:return:
|
| 262 |
"""
|
| 263 |
|
| 264 |
+
self.campaign_name = campaign_name
|
| 265 |
# ==============================================================
|
| 266 |
|
| 267 |
def set_personalization(self):
|
{Messaging_system → ai_messaging_system_v2/Messaging_system}/DataCollector.py
RENAMED
|
@@ -1,12 +1,18 @@
|
|
| 1 |
"""
|
| 2 |
setting instructions and inputs required to generate personalized messages
|
| 3 |
"""
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
import pandas as pd
|
| 6 |
-
|
|
|
|
| 7 |
|
| 8 |
class DataCollector:
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
def __init__(self, CoreConfig):
|
| 11 |
|
| 12 |
self.Core = CoreConfig
|
|
@@ -22,8 +28,12 @@ class DataCollector:
|
|
| 22 |
# extract user_ids and other data
|
| 23 |
self.extract_musora_id()
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
# selecting a sample of users
|
| 26 |
-
self.select_sample()
|
| 27 |
|
| 28 |
self.fetch_data()
|
| 29 |
|
|
@@ -50,15 +60,12 @@ class DataCollector:
|
|
| 50 |
raise Exception("Input data must contain user_id, musora_user_id, id, or email column.")
|
| 51 |
|
| 52 |
# Normalize the identification column to 'user_id'
|
| 53 |
-
if id_col in ['musora_user_id', 'id']:
|
| 54 |
self.Core.users_df.rename(columns={id_col: 'user_id'}, inplace=True)
|
|
|
|
| 55 |
elif id_col == 'email':
|
| 56 |
self._lookup_user_ids_from_email()
|
| 57 |
|
| 58 |
-
# Identify additional columns: exclude identification columns
|
| 59 |
-
identification_columns = {'user_id', 'email'} if 'email' in self.Core.users_df.columns else {'user_id'}
|
| 60 |
-
additional_columns = [col for col in self.Core.users_df.columns if col not in identification_columns]
|
| 61 |
-
self.Core.additional_info_columns = [col.lower() for col in additional_columns]
|
| 62 |
|
| 63 |
# -----------------------------------------------------------------
|
| 64 |
# -----------------------------------------------------------------
|
|
@@ -69,29 +76,61 @@ class DataCollector:
|
|
| 69 |
"""
|
| 70 |
unique_emails = self.Core.users_df["email"].unique()
|
| 71 |
data = self.Core.SF.extract_id_from_email(emails=unique_emails)
|
| 72 |
-
self.Core.users_df = pd.merge(self.Core.users_df, data, on='email', how='left')
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
# -----------------------------------------------------------------
|
| 75 |
# -----------------------------------------------------------------
|
| 76 |
def remaining_days_to_birthday(self):
|
| 77 |
"""
|
| 78 |
-
|
| 79 |
-
|
|
|
|
| 80 |
"""
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
# Iterate through each row in the DataFrame
|
| 83 |
for idx, row in self.Core.users_df.iterrows():
|
| 84 |
if pd.notna(row.get("birthday")):
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
self.Core.users_df.at[idx, "birthday_reminder"] = None
|
| 90 |
|
| 91 |
# -----------------------------------------------------------------
|
| 92 |
# -----------------------------------------------------------------
|
| 93 |
def fetch_data(self):
|
| 94 |
-
|
|
|
|
|
|
|
|
|
|
| 95 |
# Fetch datasets
|
| 96 |
user_ids = self.Core.users_df["user_id"].unique()
|
| 97 |
users_data = self.Core.SF.get_data("users", user_ids)
|
|
@@ -115,7 +154,7 @@ class DataCollector:
|
|
| 115 |
self.Core.users_df[col] = self.Core.users_df[col].replace(['', 'None', 'nan'], np.nan)
|
| 116 |
|
| 117 |
# Now drop rows where 'permission' is missing
|
| 118 |
-
self.Core.users_df.dropna(subset=["permission"], inplace=True)
|
| 119 |
self.Core.users_df = self.Core.users_df.drop_duplicates(subset=['user_id'])
|
| 120 |
|
| 121 |
self.Core.content_info = contents_data
|
|
@@ -137,29 +176,12 @@ class DataCollector:
|
|
| 137 |
self.Core.users_df["prompt"] = None # will contain final prompt
|
| 138 |
self.Core.users_df["instrument"] = self.Core.get_instrument()
|
| 139 |
self.Core.users_df["platform"] = self.Core.platform
|
| 140 |
-
self.Core.users_df["
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
# providing additional input and instructions based on available columns in the input file
|
| 147 |
-
# :return: instructions
|
| 148 |
-
# """
|
| 149 |
-
# self.Core.users_df["additional_info"] = None
|
| 150 |
-
#
|
| 151 |
-
# # Iterate through each row in the DataFrame
|
| 152 |
-
# for idx, row in self.Core.users_df.iterrows():
|
| 153 |
-
# additional_info = []
|
| 154 |
-
#
|
| 155 |
-
# # populating additional_info
|
| 156 |
-
# for feature in self.Core.additional_info_columns:
|
| 157 |
-
# value = row.get(feature)
|
| 158 |
-
# if pd.notna(value) and value not in [None, [], {}] and (
|
| 159 |
-
# not isinstance(value, str) or value.strip()):
|
| 160 |
-
# additional_info.append(f"{feature}: {str(value)}")
|
| 161 |
-
#
|
| 162 |
-
# self.Core.users_df.at[idx, "additional_info"] = "\n".join(additional_info)
|
| 163 |
|
| 164 |
# -----------------------------------------------------------------
|
| 165 |
# -----------------------------------------------------------------
|
|
@@ -171,9 +193,208 @@ class DataCollector:
|
|
| 171 |
"""
|
| 172 |
|
| 173 |
# Use self.number_of_samples if sample_size is None, otherwise default to 20
|
| 174 |
-
if sample_size is None:
|
| 175 |
-
|
|
|
|
|
|
|
| 176 |
|
| 177 |
total_users = self.Core.users_df.shape[0]
|
| 178 |
sample_size = min(total_users, sample_size)
|
| 179 |
self.Core.users_df = self.Core.users_df.sample(n=sample_size, replace=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
setting instructions and inputs required to generate personalized messages
|
| 3 |
"""
|
| 4 |
+
import json
|
| 5 |
import numpy as np
|
| 6 |
import pandas as pd
|
| 7 |
+
from datetime import datetime, timezone
|
| 8 |
+
from pathlib import Path
|
| 9 |
|
| 10 |
class DataCollector:
|
| 11 |
|
| 12 |
+
# UI mode constants (same as Permes class)
|
| 13 |
+
UI_OUTPUT_DIR = Path(__file__).parent.parent / "Data" / "ui_output"
|
| 14 |
+
UI_OUTPUT_FILE = "messages.csv"
|
| 15 |
+
|
| 16 |
def __init__(self, CoreConfig):
|
| 17 |
|
| 18 |
self.Core = CoreConfig
|
|
|
|
| 28 |
# extract user_ids and other data
|
| 29 |
self.extract_musora_id()
|
| 30 |
|
| 31 |
+
if len(self.Core.users_df)==0:
|
| 32 |
+
# No valid user exist --> users don't have valid permission
|
| 33 |
+
return self.Core
|
| 34 |
+
|
| 35 |
# selecting a sample of users
|
| 36 |
+
# self.select_sample()
|
| 37 |
|
| 38 |
self.fetch_data()
|
| 39 |
|
|
|
|
| 60 |
raise Exception("Input data must contain user_id, musora_user_id, id, or email column.")
|
| 61 |
|
| 62 |
# Normalize the identification column to 'user_id'
|
| 63 |
+
if id_col in ['musora_user_id', 'id', 'user_id']:
|
| 64 |
self.Core.users_df.rename(columns={id_col: 'user_id'}, inplace=True)
|
| 65 |
+
self._lookup_permissions()
|
| 66 |
elif id_col == 'email':
|
| 67 |
self._lookup_user_ids_from_email()
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
# -----------------------------------------------------------------
|
| 71 |
# -----------------------------------------------------------------
|
|
|
|
| 76 |
"""
|
| 77 |
unique_emails = self.Core.users_df["email"].unique()
|
| 78 |
data = self.Core.SF.extract_id_from_email(emails=unique_emails)
|
| 79 |
+
# self.Core.users_df = pd.merge(self.Core.users_df, data, on='email', how='left')
|
| 80 |
+
self.Core.users_df = data
|
| 81 |
+
# =================================================================
|
| 82 |
+
def _lookup_permissions(self):
|
| 83 |
+
"""
|
| 84 |
+
Looks up emails and permissions based on unique user_ids and merges the results
|
| 85 |
+
into self.users_df. Assumes self.users_df contains an 'user_id' column.
|
| 86 |
+
"""
|
| 87 |
+
unique_ids = self.Core.users_df["user_id"].unique()
|
| 88 |
+
data = self.Core.SF.extract_email_from_id(unique_ids=unique_ids)
|
| 89 |
+
# self.Core.users_df = pd.merge(self.Core.users_df, data, on='email', how='left')
|
| 90 |
+
self.Core.users_df = data
|
| 91 |
# -----------------------------------------------------------------
|
| 92 |
# -----------------------------------------------------------------
|
| 93 |
def remaining_days_to_birthday(self):
|
| 94 |
"""
|
| 95 |
+
Calculate the remaining days to each user's birthday.
|
| 96 |
+
Only store the number if it's less than or equal to 6 days,
|
| 97 |
+
otherwise store None.
|
| 98 |
"""
|
| 99 |
+
today = datetime.now().date()
|
| 100 |
+
self.Core.users_df["birthday_reminder"] = None
|
| 101 |
|
|
|
|
| 102 |
for idx, row in self.Core.users_df.iterrows():
|
| 103 |
if pd.notna(row.get("birthday")):
|
| 104 |
+
try:
|
| 105 |
+
# Parse the birthday value (Snowflake datetime format)
|
| 106 |
+
bday = pd.to_datetime(row["birthday"]).date()
|
| 107 |
+
|
| 108 |
+
# Replace year with current year
|
| 109 |
+
next_bday = bday.replace(year=today.year)
|
| 110 |
+
|
| 111 |
+
# If birthday already passed this year, use next year
|
| 112 |
+
if next_bday < today:
|
| 113 |
+
next_bday = next_bday.replace(year=today.year + 1)
|
| 114 |
+
|
| 115 |
+
# Days until birthday
|
| 116 |
+
remaining_days = (next_bday - today).days
|
| 117 |
+
|
| 118 |
+
# Save only if within 6 days
|
| 119 |
+
if remaining_days <= 6:
|
| 120 |
+
self.Core.users_df.at[idx, "birthday_reminder"] = remaining_days
|
| 121 |
+
else:
|
| 122 |
+
self.Core.users_df.at[idx, "birthday_reminder"] = None
|
| 123 |
+
except Exception as e:
|
| 124 |
+
# Handle invalid date formats gracefully
|
| 125 |
self.Core.users_df.at[idx, "birthday_reminder"] = None
|
| 126 |
|
| 127 |
# -----------------------------------------------------------------
|
| 128 |
# -----------------------------------------------------------------
|
| 129 |
def fetch_data(self):
|
| 130 |
+
"""
|
| 131 |
+
Fetch all required data for the process: Users, contents, interaction_data, recsys data, popular contents
|
| 132 |
+
:return:
|
| 133 |
+
"""
|
| 134 |
# Fetch datasets
|
| 135 |
user_ids = self.Core.users_df["user_id"].unique()
|
| 136 |
users_data = self.Core.SF.get_data("users", user_ids)
|
|
|
|
| 154 |
self.Core.users_df[col] = self.Core.users_df[col].replace(['', 'None', 'nan'], np.nan)
|
| 155 |
|
| 156 |
# Now drop rows where 'permission' is missing
|
| 157 |
+
# self.Core.users_df.dropna(subset=["permission"], inplace=True)
|
| 158 |
self.Core.users_df = self.Core.users_df.drop_duplicates(subset=['user_id'])
|
| 159 |
|
| 160 |
self.Core.content_info = contents_data
|
|
|
|
| 176 |
self.Core.users_df["prompt"] = None # will contain final prompt
|
| 177 |
self.Core.users_df["instrument"] = self.Core.get_instrument()
|
| 178 |
self.Core.users_df["platform"] = self.Core.platform
|
| 179 |
+
self.Core.users_df["campaign_name"] = self.Core.campaign_name
|
| 180 |
+
# creating timestamp
|
| 181 |
+
now_utc = datetime.now(timezone.utc)
|
| 182 |
+
self.Core.users_df["timestamp"] = now_utc
|
| 183 |
+
self.Core.users_df["brand"] = str(self.Core.brand).lower()
|
| 184 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
# -----------------------------------------------------------------
|
| 187 |
# -----------------------------------------------------------------
|
|
|
|
| 193 |
"""
|
| 194 |
|
| 195 |
# Use self.number_of_samples if sample_size is None, otherwise default to 20
|
| 196 |
+
if sample_size is None or sample_size==0:
|
| 197 |
+
# set it to be 20 by default
|
| 198 |
+
# sample_size = self.Core.number_of_samples if self.Core.number_of_samples is not None else 20
|
| 199 |
+
sample_size = 500
|
| 200 |
|
| 201 |
total_users = self.Core.users_df.shape[0]
|
| 202 |
sample_size = min(total_users, sample_size)
|
| 203 |
self.Core.users_df = self.Core.users_df.sample(n=sample_size, replace=False)
|
| 204 |
+
|
| 205 |
+
def fetch_log_data(self, stage, test_mode, mode="production", ui_experiment_id=None):
|
| 206 |
+
"""
|
| 207 |
+
Fetch data for users that we have already generated messages for them.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
stage: Current stage number
|
| 211 |
+
test_mode: Boolean, if True ignores cooldown period
|
| 212 |
+
mode: str, operating mode - "production", "test", or "ui"
|
| 213 |
+
ui_experiment_id: Optional experiment ID for UI mode (e.g., 'messages_a_drumeo_20260111_1756')
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
CoreConfig or None
|
| 217 |
+
"""
|
| 218 |
+
|
| 219 |
+
self.extract_musora_id()
|
| 220 |
+
|
| 221 |
+
user_ids = self.Core.users_df["user_id"].unique()
|
| 222 |
+
if len(user_ids)==0:
|
| 223 |
+
print("No users found")
|
| 224 |
+
return None
|
| 225 |
+
|
| 226 |
+
else:
|
| 227 |
+
# UI mode: Read from local CSV
|
| 228 |
+
if mode == "ui":
|
| 229 |
+
users_data = self._read_log_data_from_csv(user_ids, stage, self.Core.campaign_name, ui_experiment_id)
|
| 230 |
+
else:
|
| 231 |
+
# Production/Test mode: Read from Snowflake
|
| 232 |
+
users_data = self.Core.SF.get_log_data(user_ids, stage, self.Core.campaign_name, test_mode)
|
| 233 |
+
if len(users_data) == 0:
|
| 234 |
+
print("No users found")
|
| 235 |
+
return None
|
| 236 |
+
else:
|
| 237 |
+
users_data = self._prepare_log_data(users_data)
|
| 238 |
+
|
| 239 |
+
contents_data = self.Core.SF.get_data("contents")
|
| 240 |
+
popular_contents_data = self.Core.SF.get_data("popular_contents")
|
| 241 |
+
|
| 242 |
+
self.Core.users_df["user_id"] = self.Core.users_df["user_id"].astype(int)
|
| 243 |
+
|
| 244 |
+
# Merge additional user details into the base dataframe (self.users_df)
|
| 245 |
+
# Assuming self.users_df already exists and contains a "USER_ID" column
|
| 246 |
+
self.Core.users_df = self.Core.users_df.merge(users_data, on="user_id", how="left", suffixes=("", "_users"))
|
| 247 |
+
|
| 248 |
+
for col in self.Core.users_df.columns:
|
| 249 |
+
# Replace additional empty representations with np.nan
|
| 250 |
+
self.Core.users_df[col] = self.Core.users_df[col].replace(['', 'None', 'nan'], np.nan)
|
| 251 |
+
|
| 252 |
+
self.remaining_days_to_birthday()
|
| 253 |
+
self.Core.content_info = contents_data
|
| 254 |
+
self.Core.popular_contents_df = popular_contents_data
|
| 255 |
+
|
| 256 |
+
now_utc = datetime.now(timezone.utc)
|
| 257 |
+
self.Core.users_df["timestamp"] = now_utc
|
| 258 |
+
self.Core.users_df["brand"] = str(self.Core.brand).lower()
|
| 259 |
+
|
| 260 |
+
return self.Core
|
| 261 |
+
|
| 262 |
+
def _prepare_log_data(self, users_df):
|
| 263 |
+
|
| 264 |
+
df = users_df.copy()
|
| 265 |
+
|
| 266 |
+
# Make sure stage is int (Snowflake can return numpy types)
|
| 267 |
+
df["stage"] = df["stage"].astype(int)
|
| 268 |
+
df[["_header", "_body"]] = df.apply(self._extract_header_body, axis=1)
|
| 269 |
+
|
| 270 |
+
prev_text = (
|
| 271 |
+
df.groupby("user_id", as_index=False)
|
| 272 |
+
.apply(lambda g: pd.Series({"previous_messages": self._build_previous_messages(g)}))
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
# --- pick the single most recent record per user (i.e., highest stage in df == stage-1) ---
|
| 276 |
+
most_recent_per_user = (
|
| 277 |
+
df.sort_values(["user_id", "stage"], ascending=[True, False])
|
| 278 |
+
.groupby("user_id", as_index=False)
|
| 279 |
+
.head(1)
|
| 280 |
+
.drop(columns=["_header", "_body"])
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
# --- final user_df: one row per user + previous_messages column ---
|
| 284 |
+
users_df = most_recent_per_user.merge(prev_text, on="user_id", how="left")
|
| 285 |
+
return users_df
|
| 286 |
+
|
| 287 |
+
# --- extract header/body from MESSAGE JSON for each row ---
|
| 288 |
+
def _extract_header_body(self, row):
|
| 289 |
+
raw = row.get("message")
|
| 290 |
+
try:
|
| 291 |
+
payload = json.loads(raw) if isinstance(raw, str) else (raw or {})
|
| 292 |
+
except Exception:
|
| 293 |
+
payload = {}
|
| 294 |
+
|
| 295 |
+
# prefer the key equal to this row's stage, else fall back to first dict value
|
| 296 |
+
stage_key = str(row["stage"])
|
| 297 |
+
node = payload.get(stage_key)
|
| 298 |
+
if not isinstance(node, dict) and isinstance(payload, dict) and payload:
|
| 299 |
+
node = next((v for v in payload.values() if isinstance(v, dict)), {})
|
| 300 |
+
|
| 301 |
+
header = (node.get("header") if isinstance(node, dict) else "") or ""
|
| 302 |
+
body = (node.get("message") if isinstance(node, dict) else "") or ""
|
| 303 |
+
return pd.Series({"_header": header.strip(), "_body": body.strip()})
|
| 304 |
+
|
| 305 |
+
def _build_previous_messages(self, g: pd.DataFrame) -> str:
|
| 306 |
+
# --- build previous_messages (merge last up-to-3 messages into one string) ---
|
| 307 |
+
g = g.sort_values("stage") # chronological (oldest -> newest)
|
| 308 |
+
msgs = [{"header": h, "message": b} for h, b in zip(g["_header"], g["_body"])]
|
| 309 |
+
recent = msgs[-3:] # up to 3 most recent
|
| 310 |
+
parts = []
|
| 311 |
+
for i, m in enumerate(recent, start=1):
|
| 312 |
+
header = (m.get("header") or "").strip()
|
| 313 |
+
body = (m.get("message") or "").strip()
|
| 314 |
+
parts.append(f"Message {i}: (header) {header}\n (message) {body}")
|
| 315 |
+
return "\n\n".join(parts)
|
| 316 |
+
|
| 317 |
+
# ======================= UI MODE HELPER FUNCTIONS =======================
|
| 318 |
+
|
| 319 |
+
def _read_log_data_from_csv(self, user_ids, stage, campaign_name, ui_experiment_id=None):
|
| 320 |
+
"""
|
| 321 |
+
Read historical message data from local CSV file in UI mode.
|
| 322 |
+
|
| 323 |
+
This function reads from the single CSV file that contains all previous stages,
|
| 324 |
+
similar to reading from Snowflake in production mode.
|
| 325 |
+
|
| 326 |
+
IMPORTANT: Deduplicates by (user_id, stage) to match Snowflake behavior,
|
| 327 |
+
keeping only the most recent record for each (user_id, stage) pair.
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
user_ids: Array of user IDs to filter
|
| 331 |
+
stage: Current stage number
|
| 332 |
+
campaign_name: Campaign name
|
| 333 |
+
ui_experiment_id: Optional experiment ID for UI mode (e.g., 'messages_a_drumeo_20260111_1756')
|
| 334 |
+
|
| 335 |
+
Returns:
|
| 336 |
+
pd.DataFrame: Historical message data for previous stages (deduplicated)
|
| 337 |
+
"""
|
| 338 |
+
# Use experiment ID if provided (for AB testing), otherwise use default filename
|
| 339 |
+
if ui_experiment_id:
|
| 340 |
+
messages_file = self.UI_OUTPUT_DIR / f"{ui_experiment_id}.csv"
|
| 341 |
+
else:
|
| 342 |
+
messages_file = self.UI_OUTPUT_DIR / self.UI_OUTPUT_FILE
|
| 343 |
+
|
| 344 |
+
# Check if file exists
|
| 345 |
+
if not messages_file.exists():
|
| 346 |
+
print(f"⚠️ UI Mode: No previous message data found at {messages_file}")
|
| 347 |
+
print(f" This is expected for stage 1 or first run.")
|
| 348 |
+
return pd.DataFrame()
|
| 349 |
+
|
| 350 |
+
try:
|
| 351 |
+
# Read CSV with UTF-8-SIG encoding to support emojis and handle BOM
|
| 352 |
+
all_messages = pd.read_csv(messages_file, encoding='utf-8-sig')
|
| 353 |
+
|
| 354 |
+
# Normalize column names to lowercase
|
| 355 |
+
all_messages.columns = all_messages.columns.str.lower()
|
| 356 |
+
|
| 357 |
+
# Filter for:
|
| 358 |
+
# 1. Matching user IDs
|
| 359 |
+
# 2. Matching campaign name
|
| 360 |
+
# 3. Previous stages only (stage < current stage)
|
| 361 |
+
filtered_messages = all_messages[
|
| 362 |
+
(all_messages['user_id'].isin(user_ids)) &
|
| 363 |
+
(all_messages['campaign_name'] == campaign_name) &
|
| 364 |
+
(all_messages['stage'] < stage)
|
| 365 |
+
]
|
| 366 |
+
|
| 367 |
+
if len(filtered_messages) == 0:
|
| 368 |
+
print(f"ℹ️ UI Mode: No previous messages found for {len(user_ids)} users")
|
| 369 |
+
return pd.DataFrame()
|
| 370 |
+
|
| 371 |
+
# CRITICAL: Deduplicate by (user_id, stage) to match Snowflake behavior
|
| 372 |
+
# Keep only the most recent record per (user_id, stage) based on timestamp
|
| 373 |
+
# This prevents duplicate rows from being processed in follow-up stages
|
| 374 |
+
if 'timestamp' in filtered_messages.columns:
|
| 375 |
+
# Convert timestamp to datetime for proper sorting
|
| 376 |
+
filtered_messages['timestamp'] = pd.to_datetime(filtered_messages['timestamp'])
|
| 377 |
+
|
| 378 |
+
# Sort by timestamp descending and keep first (most recent) per (user_id, stage)
|
| 379 |
+
filtered_messages = (
|
| 380 |
+
filtered_messages
|
| 381 |
+
.sort_values('timestamp', ascending=False)
|
| 382 |
+
.groupby(['user_id', 'stage'], as_index=False)
|
| 383 |
+
.first()
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
total_before_dedup = len(all_messages[(all_messages['user_id'].isin(user_ids)) &
|
| 387 |
+
(all_messages['campaign_name'] == campaign_name) &
|
| 388 |
+
(all_messages['stage'] < stage)])
|
| 389 |
+
deduped_count = len(filtered_messages)
|
| 390 |
+
|
| 391 |
+
print(f"✅ UI Mode: Loaded {deduped_count} previous messages from {messages_file}")
|
| 392 |
+
if total_before_dedup > deduped_count:
|
| 393 |
+
print(f" (Deduplicated {total_before_dedup - deduped_count} duplicate records)")
|
| 394 |
+
|
| 395 |
+
return filtered_messages
|
| 396 |
+
|
| 397 |
+
except Exception as e:
|
| 398 |
+
print(f"❌ Error reading UI mode data from {messages_file}: {str(e)}")
|
| 399 |
+
return pd.DataFrame()
|
| 400 |
+
|
{Messaging_system → ai_messaging_system_v2/Messaging_system}/Homepage_Recommender.py
RENAMED
|
File without changes
|
{Messaging_system → ai_messaging_system_v2/Messaging_system}/LLM.py
RENAMED
|
@@ -3,16 +3,14 @@ This class contains multiple LLMs and handles LLMs response
|
|
| 3 |
"""
|
| 4 |
|
| 5 |
import json
|
| 6 |
-
import time
|
| 7 |
from openai import OpenAI
|
| 8 |
import openai
|
| 9 |
-
import torch
|
| 10 |
import re
|
| 11 |
-
import anthropic
|
| 12 |
import os
|
| 13 |
-
import streamlit as st
|
| 14 |
from google.genai import types
|
| 15 |
from google import genai
|
|
|
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
|
|
@@ -20,28 +18,43 @@ class LLM:
|
|
| 20 |
def __init__(self, Core):
|
| 21 |
self.Core = Core
|
| 22 |
self.model = None
|
| 23 |
-
self.model_type = "openai"
|
| 24 |
self.client = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
self.connect_to_llm()
|
| 26 |
|
| 27 |
def get_credential(self, key):
|
| 28 |
-
return os.getenv(key)
|
| 29 |
-
|
| 30 |
-
def get_response(self, prompt, instructions):
|
| 31 |
-
if self.model_type == "openai":
|
| 32 |
-
response = self.get_message_openai(prompt, instructions)
|
| 33 |
-
# elif self.model_type == "ollama":
|
| 34 |
-
# response = self.get_message_ollama(prompt, instructions)
|
| 35 |
-
elif self.model_type == "inference":
|
| 36 |
-
response = self.get_message_inference(prompt, instructions)
|
| 37 |
-
elif self.model_type == "claude":
|
| 38 |
-
response = self.get_message_claude(prompt, instructions)
|
| 39 |
-
elif self.model_type == "google":
|
| 40 |
-
response = self.get_message_google(prompt, instructions)
|
| 41 |
-
else:
|
| 42 |
-
raise f"Invalid model type : {self.model_type}"
|
| 43 |
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
def connect_to_llm(self):
|
| 47 |
"""
|
|
@@ -52,118 +65,61 @@ class LLM:
|
|
| 52 |
if self.Core.model in self.Core.config_file["openai_models"]:
|
| 53 |
self.model_type = "openai"
|
| 54 |
|
| 55 |
-
elif self.Core.model in self.Core.config_file["inference_models"]:
|
| 56 |
-
self.model_type = "inference"
|
| 57 |
-
|
| 58 |
elif self.Core.model in self.Core.config_file["google_models"]:
|
| 59 |
self.model_type = "google"
|
| 60 |
|
| 61 |
-
# elif self.Core.model in self.Core.config_file["ollama_models"]:
|
| 62 |
-
# self.model_type = "ollama"
|
| 63 |
-
# self.client = ollama.Client()
|
| 64 |
-
|
| 65 |
-
elif self.Core.model in self.Core.config_file["claude_models"]:
|
| 66 |
-
self.model_type = "claude"
|
| 67 |
-
self.client = anthropic.Anthropic(
|
| 68 |
-
api_key=self.get_credential('claude_api_key'),
|
| 69 |
-
)
|
| 70 |
-
|
| 71 |
self.model = self.Core.model
|
| 72 |
|
| 73 |
-
|
| 74 |
-
def get_message_inference(self, prompt, instructions, max_retries=4):
|
| 75 |
-
"""
|
| 76 |
-
sending the prompt to openai LLM and get back the response
|
| 77 |
"""
|
|
|
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
)
|
| 84 |
-
|
| 85 |
-
for attempt in range(max_retries):
|
| 86 |
-
try:
|
| 87 |
-
if self.Core.reasoning_model:
|
| 88 |
-
response = client.chat.completions.create(
|
| 89 |
-
model=self.Core.model,
|
| 90 |
-
response_format={"type": "json_object"},
|
| 91 |
-
messages=[
|
| 92 |
-
{"role": "system", "content": instructions},
|
| 93 |
-
{"role": "user", "content": prompt}
|
| 94 |
-
],
|
| 95 |
-
reasoning_effort="low",
|
| 96 |
-
n=1,
|
| 97 |
-
)
|
| 98 |
-
|
| 99 |
-
else:
|
| 100 |
-
response = client.chat.completions.create(
|
| 101 |
-
model=self.Core.model,
|
| 102 |
-
response_format={"type": "json_object"},
|
| 103 |
-
messages=[
|
| 104 |
-
{"role": "system", "content": instructions},
|
| 105 |
-
{"role": "user", "content": prompt}
|
| 106 |
-
],
|
| 107 |
-
n=1,
|
| 108 |
-
temperature=self.Core.temperature
|
| 109 |
-
)
|
| 110 |
-
|
| 111 |
-
tokens = {
|
| 112 |
-
'prompt_tokens': response.usage.prompt_tokens,
|
| 113 |
-
'completion_tokens': response.usage.completion_tokens,
|
| 114 |
-
'total_tokens': response.usage.total_tokens
|
| 115 |
-
}
|
| 116 |
-
|
| 117 |
-
# validating the JSON
|
| 118 |
-
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 119 |
-
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 120 |
-
self.Core.temp_token_counter += tokens['total_tokens']
|
| 121 |
-
|
| 122 |
-
try:
|
| 123 |
-
content = response.choices[0].message.content
|
| 124 |
|
| 125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
continue # Continue to next attempt
|
| 132 |
|
| 133 |
-
|
| 134 |
-
if len(output["header"].strip()) > self.Core.config_file["header_limit"] or len(
|
| 135 |
-
output["message"].strip()) > self.Core.config_file["message_limit"]:
|
| 136 |
-
print(
|
| 137 |
-
f"'header' or 'message' is more than specified characters in response on attempt {attempt + 1}. Retrying...")
|
| 138 |
-
continue
|
| 139 |
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
|
| 142 |
-
|
| 143 |
-
|
|
|
|
| 144 |
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
print(e.__cause__) # an underlying Exception, likely raised within httpx.
|
| 148 |
-
except openai.RateLimitError as e:
|
| 149 |
-
print("A 429 status code was received; we should back off a bit.")
|
| 150 |
-
except openai.APIStatusError as e:
|
| 151 |
-
print("Another non-200-range status code was received")
|
| 152 |
-
print(e.status_code)
|
| 153 |
-
print(e.response)
|
| 154 |
-
|
| 155 |
-
print("Max retries exceeded. Returning empty response.")
|
| 156 |
-
return None
|
| 157 |
|
| 158 |
# =========================================================================
|
| 159 |
-
def get_message_google(self, prompt, instructions, max_retries=
|
| 160 |
|
| 161 |
-
client = genai.Client(api_key=self.get_credential("
|
|
|
|
| 162 |
|
| 163 |
for attempt in range(max_retries):
|
| 164 |
try:
|
| 165 |
response = client.models.generate_content(
|
| 166 |
-
model=
|
| 167 |
contents=prompt,
|
| 168 |
config=types.GenerateContentConfig(
|
| 169 |
thinking_config=types.ThinkingConfig(thinking_budget=0),
|
|
@@ -172,34 +128,26 @@ class LLM:
|
|
| 172 |
response_mime_type="application/json"
|
| 173 |
))
|
| 174 |
|
|
|
|
| 175 |
tokens = {
|
| 176 |
'prompt_tokens': response.usage_metadata.prompt_token_count,
|
| 177 |
'completion_tokens': response.usage_metadata.candidates_token_count,
|
| 178 |
'total_tokens': response.usage_metadata.total_token_count
|
| 179 |
}
|
| 180 |
|
| 181 |
-
#
|
| 182 |
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 183 |
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 184 |
-
self.Core.temp_token_counter += tokens['
|
| 185 |
|
| 186 |
output = self.preprocess_and_parse_json(response.text)
|
| 187 |
-
# output = json.loads(str(response.text))
|
| 188 |
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
|
|
|
|
|
|
| 192 |
|
| 193 |
-
else:
|
| 194 |
-
if len(output["header"].strip()) > self.Core.config_file["header_limit"] or len(
|
| 195 |
-
output["message"].strip()) > self.Core.config_file["message_limit"]:
|
| 196 |
-
print(
|
| 197 |
-
f"'header' or 'message' is more than specified characters in response on attempt {attempt + 1}. Retrying...")
|
| 198 |
-
headchar= len(output["header"].strip())
|
| 199 |
-
messagechar = len(output["message"].strip())
|
| 200 |
-
print(
|
| 201 |
-
f"'header' has {headchar} chars and 'message' has {messagechar} ...")
|
| 202 |
-
continue
|
| 203 |
return output
|
| 204 |
|
| 205 |
except json.JSONDecodeError:
|
|
@@ -212,13 +160,13 @@ class LLM:
|
|
| 212 |
|
| 213 |
# =========================================================================
|
| 214 |
|
| 215 |
-
def get_message_openai(self, prompt, instructions, max_retries=
|
| 216 |
"""
|
| 217 |
sending the prompt to openai LLM and get back the response
|
| 218 |
"""
|
| 219 |
|
| 220 |
-
openai.api_key = self.
|
| 221 |
-
client = OpenAI(api_key=self.
|
| 222 |
|
| 223 |
for attempt in range(max_retries):
|
| 224 |
try:
|
|
@@ -252,29 +200,24 @@ class LLM:
|
|
| 252 |
'total_tokens': response.usage.total_tokens
|
| 253 |
}
|
| 254 |
|
| 255 |
-
|
| 256 |
-
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 257 |
-
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 258 |
-
self.Core.temp_token_counter += tokens['total_tokens']
|
| 259 |
|
| 260 |
try:
|
| 261 |
content = response.choices[0].message.content
|
| 262 |
|
| 263 |
# Extract JSON code block
|
| 264 |
-
|
| 265 |
output = json.loads(content)
|
| 266 |
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
if len(output["header"].strip()) > self.Core.config_file["header_limit"] or len(
|
| 273 |
-
output["message"].strip()) > self.Core.config_file["message_limit"]:
|
| 274 |
-
print(
|
| 275 |
-
f"'header' or 'message' is more than specified characters in response on attempt {attempt + 1}. Retrying...")
|
| 276 |
-
continue
|
| 277 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
return output
|
| 279 |
|
| 280 |
except json.JSONDecodeError:
|
|
@@ -293,130 +236,14 @@ class LLM:
|
|
| 293 |
print("Max retries exceeded. Returning empty response.")
|
| 294 |
return None
|
| 295 |
|
| 296 |
-
#
|
| 297 |
-
|
| 298 |
-
def get_message_ollama(self, prompt, instructions, max_retries=10):
|
| 299 |
-
"""
|
| 300 |
-
Send the prompt to the LLM and get back the response.
|
| 301 |
-
Includes handling for GPU memory issues by clearing cache and waiting before retry.
|
| 302 |
-
"""
|
| 303 |
-
prompt = instructions + "\n \n" + prompt
|
| 304 |
-
for attempt in range(max_retries):
|
| 305 |
-
try:
|
| 306 |
-
# Try generating the response
|
| 307 |
-
response = self.client.generate(model=self.model, prompt=prompt)
|
| 308 |
-
except Exception as e:
|
| 309 |
-
# This catches errors like the connection being forcibly closed
|
| 310 |
-
print(f"Error on attempt {attempt + 1}: {e}.")
|
| 311 |
-
try:
|
| 312 |
-
# Clear GPU cache if you're using PyTorch; this may help free up memory
|
| 313 |
-
torch.cuda.empty_cache()
|
| 314 |
-
print("Cleared GPU cache.")
|
| 315 |
-
except Exception as cache_err:
|
| 316 |
-
print("Failed to clear GPU cache:", cache_err)
|
| 317 |
-
# Wait a bit before retrying to allow memory to recover
|
| 318 |
-
time.sleep(2)
|
| 319 |
-
continue
|
| 320 |
-
|
| 321 |
-
try:
|
| 322 |
-
tokens = {
|
| 323 |
-
'prompt_tokens': 0,
|
| 324 |
-
'completion_tokens': 0,
|
| 325 |
-
'total_tokens': 0
|
| 326 |
-
}
|
| 327 |
-
|
| 328 |
-
try:
|
| 329 |
-
output = self.preprocess_and_parse_json(response.response)
|
| 330 |
-
if output is None:
|
| 331 |
-
continue
|
| 332 |
-
|
| 333 |
-
if 'message' not in output or 'header' not in output:
|
| 334 |
-
print(f"'message' or 'header' is missing in response on attempt {attempt + 1}. Retrying...")
|
| 335 |
-
continue # Continue to next attempt
|
| 336 |
-
|
| 337 |
-
else:
|
| 338 |
-
if len(output["header"].strip()) > self.Core.config_file["header_limit"] or len(
|
| 339 |
-
output["message"].strip()) > self.Core.config_file["message_limit"]:
|
| 340 |
-
print(
|
| 341 |
-
f"'header' or 'message' is more than specified characters in response on attempt {attempt + 1}. Retrying...")
|
| 342 |
-
continue
|
| 343 |
-
else:
|
| 344 |
-
return output
|
| 345 |
-
|
| 346 |
-
except json.JSONDecodeError:
|
| 347 |
-
print(f"Invalid JSON from LLM on attempt {attempt + 1}. Retrying...")
|
| 348 |
-
except Exception as parse_error:
|
| 349 |
-
print("Error processing output:", parse_error)
|
| 350 |
-
|
| 351 |
-
print("Max retries exceeded. Returning empty response.")
|
| 352 |
-
return None
|
| 353 |
-
|
| 354 |
-
def get_message_claude(self, prompt, instructions, max_retries=4):
|
| 355 |
-
"""
|
| 356 |
-
send prompt to claude LLM and get back the response
|
| 357 |
-
:param prompt:
|
| 358 |
-
:param instructions:
|
| 359 |
-
:return:
|
| 360 |
-
"""
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
for attempt in range(max_retries):
|
| 364 |
-
try:
|
| 365 |
-
|
| 366 |
-
message = self.client.messages.create(
|
| 367 |
-
model=self.model,
|
| 368 |
-
max_tokens=4096,
|
| 369 |
-
system = instructions,
|
| 370 |
-
messages=[
|
| 371 |
-
{"role": "user", "content": prompt + "\nHere is the JSON requested:\n"}
|
| 372 |
-
],
|
| 373 |
-
temperature=self.Core.temperature
|
| 374 |
-
)
|
| 375 |
-
# Try generating the response
|
| 376 |
-
response = message.content[0].text
|
| 377 |
-
|
| 378 |
-
tokens = {
|
| 379 |
-
'prompt_tokens': message.usage.input_tokens,
|
| 380 |
-
'completion_tokens': message.usage.output_tokens,
|
| 381 |
-
'total_tokens': message.usage.output_tokens + message.usage.input_tokens
|
| 382 |
-
}
|
| 383 |
-
|
| 384 |
-
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 385 |
-
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 386 |
-
self.Core.temp_token_counter += tokens['total_tokens']
|
| 387 |
-
|
| 388 |
-
try:
|
| 389 |
-
output = self.preprocess_and_parse_json_claude(response)
|
| 390 |
-
if output is None:
|
| 391 |
-
continue
|
| 392 |
-
|
| 393 |
-
if 'message' not in output or 'header' not in output:
|
| 394 |
-
print(f"'message' or 'header' is missing in response on attempt {attempt + 1}. Retrying...")
|
| 395 |
-
continue # Continue to next attempt
|
| 396 |
-
|
| 397 |
-
else:
|
| 398 |
-
if len(output["header"].strip()) > self.Core.config_file["header_limit"] or len(
|
| 399 |
-
output["message"].strip()) > self.Core.config_file["message_limit"]:
|
| 400 |
-
print(
|
| 401 |
-
f"'header' or 'message' is more than specified characters in response on attempt {attempt + 1}. Retrying...")
|
| 402 |
-
continue
|
| 403 |
-
else:
|
| 404 |
-
return output
|
| 405 |
-
|
| 406 |
-
except json.JSONDecodeError:
|
| 407 |
-
print(f"Invalid JSON from LLM on attempt {attempt + 1}. Retrying...")
|
| 408 |
-
except Exception as parse_error:
|
| 409 |
-
print("Error processing output:", parse_error)
|
| 410 |
-
|
| 411 |
-
print("Max retries exceeded. Returning empty response.")
|
| 412 |
-
return None
|
| 413 |
-
|
| 414 |
-
# ======================================================================
|
| 415 |
|
| 416 |
def preprocess_and_parse_json(self, response: str):
|
| 417 |
"""
|
| 418 |
Remove <think> blocks, extract JSON (from ```json fences or first {...} block),
|
| 419 |
-
|
|
|
|
|
|
|
| 420 |
"""
|
| 421 |
|
| 422 |
def extract_json(text: str) -> str:
|
|
@@ -448,45 +275,153 @@ class LLM:
|
|
| 448 |
# Remove commas before } or ]
|
| 449 |
return re.sub(r',(\s*[}\]])', r'\1', text)
|
| 450 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 451 |
raw = extract_json(response)
|
| 452 |
raw = normalize_quotes(raw)
|
| 453 |
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 457 |
# Repair pass
|
| 458 |
repaired = strip_comments(raw)
|
| 459 |
-
repaired = remove_trailing_commas(repaired)
|
| 460 |
-
|
| 461 |
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
print(f"Failed to parse JSON: {e}")
|
| 466 |
-
# print('Offending text:', repaired)
|
| 467 |
-
return None
|
| 468 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 469 |
# ===============================================================
|
| 470 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
"""
|
| 472 |
-
|
|
|
|
| 473 |
"""
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 478 |
|
| 479 |
-
parsed_response = json.loads(json_string)
|
| 480 |
|
| 481 |
-
if not isinstance(parsed_response, dict):
|
| 482 |
-
raise ValueError(f"Parsed response is not a dict: {parsed_response}")
|
| 483 |
|
| 484 |
-
return parsed_response
|
| 485 |
|
| 486 |
-
except ValueError as ve:
|
| 487 |
-
raise ValueError(f"Could not extract JSON from Claude response: {ve}\nOriginal response: {response}")
|
| 488 |
-
except json.JSONDecodeError as je:
|
| 489 |
-
raise ValueError(f"Failed to parse JSON from string: {json_string}\nError: {je}")
|
| 490 |
|
| 491 |
|
| 492 |
|
|
|
|
| 3 |
"""
|
| 4 |
|
| 5 |
import json
|
|
|
|
| 6 |
from openai import OpenAI
|
| 7 |
import openai
|
|
|
|
| 8 |
import re
|
|
|
|
| 9 |
import os
|
|
|
|
| 10 |
from google.genai import types
|
| 11 |
from google import genai
|
| 12 |
+
import logging
|
| 13 |
+
logger = logging.getLogger()
|
| 14 |
|
| 15 |
|
| 16 |
|
|
|
|
| 18 |
def __init__(self, Core):
|
| 19 |
self.Core = Core
|
| 20 |
self.model = None
|
| 21 |
+
self.model_type = "openai"
|
| 22 |
self.client = None
|
| 23 |
+
|
| 24 |
+
# failure tracking + cached model lists
|
| 25 |
+
self.failure_counts = {} # {"gemini-2.5-flash": 2, ...}
|
| 26 |
+
# self.google_models = self.Core.config_file.get("google_models", [])
|
| 27 |
+
self.google_models= ["gemini-2.5-flash-lite", "gemini-2.5-flash"]
|
| 28 |
+
self.openai_models = self.Core.config_file.get("openai_models", [])
|
| 29 |
+
self.openai_fallback_model = self.Core.config_file.get("openai_fallback_models", "gpt-5-nano")
|
| 30 |
+
self.failure_threshold = self.Core.config_file.get("model_failure_threshold", 3)
|
| 31 |
self.connect_to_llm()
|
| 32 |
|
| 33 |
def get_credential(self, key):
|
| 34 |
+
return os.getenv(key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
def get_response(self, prompt, instructions, validation_mode="message_generation"):
|
| 37 |
+
"""
|
| 38 |
+
Get response from LLM with specified validation mode.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
prompt: The prompt to send to the LLM
|
| 42 |
+
instructions: System instructions for the LLM
|
| 43 |
+
validation_mode: Type of validation to perform on the response
|
| 44 |
+
- "message_generation": Validates header/message keys and character limits
|
| 45 |
+
- "validation_response": Validates approved/issues/feedback keys
|
| 46 |
+
- "generic_json": Only validates that it's valid JSON, no specific keys required
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
Parsed JSON response or None if all retries failed
|
| 50 |
+
"""
|
| 51 |
+
if self.model_type == "google":
|
| 52 |
+
return self._get_response_google_with_fallback(prompt, instructions, validation_mode)
|
| 53 |
+
elif self.model_type == "openai":
|
| 54 |
+
response = self.get_message_openai(prompt, instructions, validation_mode=validation_mode)
|
| 55 |
+
return response
|
| 56 |
+
else:
|
| 57 |
+
raise RuntimeError(f"Invalid model type : {self.model_type}")
|
| 58 |
|
| 59 |
def connect_to_llm(self):
|
| 60 |
"""
|
|
|
|
| 65 |
if self.Core.model in self.Core.config_file["openai_models"]:
|
| 66 |
self.model_type = "openai"
|
| 67 |
|
|
|
|
|
|
|
|
|
|
| 68 |
elif self.Core.model in self.Core.config_file["google_models"]:
|
| 69 |
self.model_type = "google"
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
self.model = self.Core.model
|
| 72 |
|
| 73 |
+
def _validate_response(self, output: dict, validation_mode: str, attempt: int) -> tuple:
|
|
|
|
|
|
|
|
|
|
| 74 |
"""
|
| 75 |
+
Validate LLM response based on validation mode.
|
| 76 |
|
| 77 |
+
Args:
|
| 78 |
+
output: Parsed JSON output from LLM
|
| 79 |
+
validation_mode: Type of validation to perform
|
| 80 |
+
attempt: Current attempt number (for logging)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
Returns:
|
| 83 |
+
Tuple of (is_valid: bool, error_message: str or None)
|
| 84 |
+
"""
|
| 85 |
+
if validation_mode == "message_generation":
|
| 86 |
+
# Validate header/message keys and character limits
|
| 87 |
+
if 'message' not in output or 'header' not in output:
|
| 88 |
+
return False, f"'message' or 'header' is missing in response on attempt {attempt + 1}. Retrying..."
|
| 89 |
|
| 90 |
+
header_len = len(output["header"].strip())
|
| 91 |
+
message_len = len(output["message"].strip())
|
| 92 |
+
header_limit = self.Core.config_file["header_limit"]
|
| 93 |
+
message_limit = self.Core.config_file["message_limit"]
|
| 94 |
|
| 95 |
+
if header_len > header_limit or message_len > message_limit:
|
| 96 |
+
return False, f"'header' ({header_len}/{header_limit}) or 'message' ({message_len}/{message_limit}) exceeds character limit on attempt {attempt + 1}. Retrying..."
|
|
|
|
| 97 |
|
| 98 |
+
return True, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
|
| 100 |
+
elif validation_mode == "validation_response":
|
| 101 |
+
# Validate approved/issues/feedback keys for SecurityAgent
|
| 102 |
+
if 'approved' not in output:
|
| 103 |
+
return False, f"'approved' key is missing in validation response on attempt {attempt + 1}. Retrying..."
|
| 104 |
+
return True, None
|
| 105 |
|
| 106 |
+
elif validation_mode == "generic_json":
|
| 107 |
+
# No specific key validation, just ensure it's valid JSON (already parsed)
|
| 108 |
+
return True, None
|
| 109 |
|
| 110 |
+
else:
|
| 111 |
+
raise ValueError(f"Unknown validation_mode: {validation_mode}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
# =========================================================================
|
| 114 |
+
def get_message_google(self, prompt, instructions, max_retries=6, model_override=None, validation_mode="message_generation"):
|
| 115 |
|
| 116 |
+
client = genai.Client(api_key=self.get_credential("GOOGLE_API_KEY"))
|
| 117 |
+
active_model = model_override or self.Core.model
|
| 118 |
|
| 119 |
for attempt in range(max_retries):
|
| 120 |
try:
|
| 121 |
response = client.models.generate_content(
|
| 122 |
+
model=active_model,
|
| 123 |
contents=prompt,
|
| 124 |
config=types.GenerateContentConfig(
|
| 125 |
thinking_config=types.ThinkingConfig(thinking_budget=0),
|
|
|
|
| 128 |
response_mime_type="application/json"
|
| 129 |
))
|
| 130 |
|
| 131 |
+
# output = json.loads(str(response.text))
|
| 132 |
tokens = {
|
| 133 |
'prompt_tokens': response.usage_metadata.prompt_token_count,
|
| 134 |
'completion_tokens': response.usage_metadata.candidates_token_count,
|
| 135 |
'total_tokens': response.usage_metadata.total_token_count
|
| 136 |
}
|
| 137 |
|
| 138 |
+
# Update token counts
|
| 139 |
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 140 |
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 141 |
+
self.Core.temp_token_counter += tokens['prompt_tokens']
|
| 142 |
|
| 143 |
output = self.preprocess_and_parse_json(response.text)
|
|
|
|
| 144 |
|
| 145 |
+
# Validate based on mode
|
| 146 |
+
is_valid, error_msg = self._validate_response(output, validation_mode, attempt)
|
| 147 |
+
if not is_valid:
|
| 148 |
+
print(error_msg)
|
| 149 |
+
continue
|
| 150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
return output
|
| 152 |
|
| 153 |
except json.JSONDecodeError:
|
|
|
|
| 160 |
|
| 161 |
# =========================================================================
|
| 162 |
|
| 163 |
+
def get_message_openai(self, prompt, instructions, max_retries=5, validation_mode="message_generation"):
|
| 164 |
"""
|
| 165 |
sending the prompt to openai LLM and get back the response
|
| 166 |
"""
|
| 167 |
|
| 168 |
+
openai.api_key = self.get_credential('OPENAI_API_KEY')
|
| 169 |
+
client = OpenAI(api_key=self.get_credential('OPENAI_API_KEY'))
|
| 170 |
|
| 171 |
for attempt in range(max_retries):
|
| 172 |
try:
|
|
|
|
| 200 |
'total_tokens': response.usage.total_tokens
|
| 201 |
}
|
| 202 |
|
| 203 |
+
self.Core.temp_token_counter += tokens['prompt_tokens']
|
|
|
|
|
|
|
|
|
|
| 204 |
|
| 205 |
try:
|
| 206 |
content = response.choices[0].message.content
|
| 207 |
|
| 208 |
# Extract JSON code block
|
|
|
|
| 209 |
output = json.loads(content)
|
| 210 |
|
| 211 |
+
# Validate based on mode
|
| 212 |
+
is_valid, error_msg = self._validate_response(output, validation_mode, attempt)
|
| 213 |
+
if not is_valid:
|
| 214 |
+
print(error_msg)
|
| 215 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
|
| 217 |
+
# validating the JSON
|
| 218 |
+
self.Core.total_tokens['prompt_tokens'] += tokens['prompt_tokens']
|
| 219 |
+
self.Core.total_tokens['completion_tokens'] += tokens['completion_tokens']
|
| 220 |
+
self.Core.temp_token_counter += tokens['total_tokens']
|
| 221 |
return output
|
| 222 |
|
| 223 |
except json.JSONDecodeError:
|
|
|
|
| 236 |
print("Max retries exceeded. Returning empty response.")
|
| 237 |
return None
|
| 238 |
|
| 239 |
+
# ===============================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
def preprocess_and_parse_json(self, response: str):
|
| 242 |
"""
|
| 243 |
Remove <think> blocks, extract JSON (from ```json fences or first {...} block),
|
| 244 |
+
parse with a small repair pass, then sanitize string values to avoid double quotes
|
| 245 |
+
inside strings (convert " to ' so serialized output won't need escapes), and also
|
| 246 |
+
strip trailing commas from string values.
|
| 247 |
"""
|
| 248 |
|
| 249 |
def extract_json(text: str) -> str:
|
|
|
|
| 275 |
# Remove commas before } or ]
|
| 276 |
return re.sub(r',(\s*[}\]])', r'\1', text)
|
| 277 |
|
| 278 |
+
# ---- New: value-level sanitizer ----
|
| 279 |
+
def sanitize_strings(obj):
|
| 280 |
+
"""
|
| 281 |
+
Recursively walk obj; for any string value:
|
| 282 |
+
- replace double quotes with single quotes
|
| 283 |
+
- remove trailing commas
|
| 284 |
+
Keys are left unchanged.
|
| 285 |
+
"""
|
| 286 |
+
if isinstance(obj, str):
|
| 287 |
+
# Normalize curly quotes to straight
|
| 288 |
+
s = obj.replace('“', '"').replace('”', '"')
|
| 289 |
+
# Convert any " to '
|
| 290 |
+
if '"' in s:
|
| 291 |
+
s = s.replace('"', "'")
|
| 292 |
+
# Strip trailing commas/spaces
|
| 293 |
+
s = s.rstrip(" ,")
|
| 294 |
+
return s
|
| 295 |
+
elif isinstance(obj, list):
|
| 296 |
+
return [sanitize_strings(v) for v in obj]
|
| 297 |
+
elif isinstance(obj, tuple):
|
| 298 |
+
return tuple(sanitize_strings(v) for v in obj)
|
| 299 |
+
elif isinstance(obj, dict):
|
| 300 |
+
return {k: sanitize_strings(v) for k, v in obj.items()}
|
| 301 |
+
else:
|
| 302 |
+
return obj
|
| 303 |
+
|
| 304 |
raw = extract_json(response)
|
| 305 |
raw = normalize_quotes(raw)
|
| 306 |
|
| 307 |
+
def _try_parse(s: str):
|
| 308 |
+
try:
|
| 309 |
+
return json.loads(s)
|
| 310 |
+
except json.JSONDecodeError:
|
| 311 |
+
return None
|
| 312 |
+
|
| 313 |
+
parsed = _try_parse(raw)
|
| 314 |
+
if parsed is None:
|
| 315 |
# Repair pass
|
| 316 |
repaired = strip_comments(raw)
|
| 317 |
+
repaired = remove_trailing_commas(repaired).strip()
|
| 318 |
+
parsed = _try_parse(repaired)
|
| 319 |
|
| 320 |
+
if parsed is None:
|
| 321 |
+
# Last resort: fail closed
|
| 322 |
+
return None
|
|
|
|
|
|
|
|
|
|
| 323 |
|
| 324 |
+
# Post-parse sanitization: remove double quotes + trailing commas inside string values
|
| 325 |
+
sanitized = sanitize_strings(parsed)
|
| 326 |
+
return sanitized
|
| 327 |
+
|
| 328 |
+
# ===============================================================
|
| 329 |
+
# Helper functions for fall back strategy
|
| 330 |
+
# shifting to other models in case of failure more than expected
|
| 331 |
# ===============================================================
|
| 332 |
+
def _record_success(self, model_name: str):
|
| 333 |
+
self.failure_counts[model_name] = 0
|
| 334 |
+
|
| 335 |
+
# ========================================================
|
| 336 |
+
def _record_failure(self, model_name: str):
|
| 337 |
+
self.failure_counts[model_name] = self.failure_counts.get(model_name, 0) + 1
|
| 338 |
+
|
| 339 |
+
# ========================================================
|
| 340 |
+
def _should_promote(self, model_name: str):
|
| 341 |
+
return self.failure_counts.get(model_name, 0) >= self.failure_threshold
|
| 342 |
+
|
| 343 |
+
# ========================================================
|
| 344 |
+
def _next_google_model(self, current: str):
|
| 345 |
+
if not self.google_models:
|
| 346 |
+
return None
|
| 347 |
+
if current not in self.google_models:
|
| 348 |
+
return self.google_models[0]
|
| 349 |
+
idx = self.google_models.index(current)
|
| 350 |
+
return self.google_models[(idx + 1) % len(self.google_models)]
|
| 351 |
+
|
| 352 |
+
# ========================================================
|
| 353 |
+
def _promote_google_model(self, new_model: str):
|
| 354 |
+
"""Permanently switch default to new_model (Google)."""
|
| 355 |
+
if new_model and new_model in self.google_models:
|
| 356 |
+
self.Core.model = new_model
|
| 357 |
+
self.model = new_model
|
| 358 |
+
self.model_type = "google"
|
| 359 |
+
# reset its counter so we don't immediately bounce again
|
| 360 |
+
self.failure_counts[new_model] = 0
|
| 361 |
+
# (optional) log
|
| 362 |
+
print(f"[LLM] Permanently switched default Google model to: {new_model}")
|
| 363 |
+
#========================================================
|
| 364 |
+
def _get_response_google_with_fallback(self, prompt, instructions, validation_mode="message_generation"):
|
| 365 |
"""
|
| 366 |
+
Try current Google model; if it fails, walk through the rest of google_models just for THIS call.
|
| 367 |
+
If current model crosses threshold, permanently promote the next model.
|
| 368 |
"""
|
| 369 |
+
if self.Core.openai_fallback_enabled is True:
|
| 370 |
+
response = self.get_message_openai(prompt, instructions, validation_mode=validation_mode)
|
| 371 |
+
return response
|
| 372 |
+
|
| 373 |
+
elif self.Core.openai_fallback_enabled is False:
|
| 374 |
+
if not self.google_models:
|
| 375 |
+
raise RuntimeError("No Google models configured.")
|
| 376 |
+
|
| 377 |
+
current = self.Core.model if self.Core.model in self.google_models else self.google_models[0]
|
| 378 |
+
|
| 379 |
+
# Build per-request trial order: current first, then the rest in ring order
|
| 380 |
+
start_idx = self.google_models.index(current)
|
| 381 |
+
trial_order = self.google_models[start_idx:] + self.google_models[:start_idx]
|
| 382 |
+
|
| 383 |
+
original_model = current
|
| 384 |
+
|
| 385 |
+
for idx, model_name in enumerate(trial_order):
|
| 386 |
+
output = self.get_message_google(
|
| 387 |
+
prompt,
|
| 388 |
+
instructions,
|
| 389 |
+
max_retries=self.Core.config_file.get("per_model_retries", 6),
|
| 390 |
+
model_override=model_name,
|
| 391 |
+
validation_mode=validation_mode
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
if output is not None:
|
| 395 |
+
# success path
|
| 396 |
+
self._record_success(model_name)
|
| 397 |
+
|
| 398 |
+
# If we had to fallback away from the original model, count that as a failure for it.
|
| 399 |
+
if idx > 0:
|
| 400 |
+
self._record_failure(original_model)
|
| 401 |
+
if self._should_promote(original_model):
|
| 402 |
+
self._promote_google_model(model_name)
|
| 403 |
+
|
| 404 |
+
return output
|
| 405 |
+
|
| 406 |
+
# failed for this model_name
|
| 407 |
+
self._record_failure(model_name)
|
| 408 |
+
logger.info(f"Google model '{model_name}' failed after retries.")
|
| 409 |
+
|
| 410 |
+
# All google models fail --> Fall back to openai model
|
| 411 |
+
self.Core.openai_fallback_enabled = True
|
| 412 |
+
self.Core.model = self.openai_fallback_model
|
| 413 |
+
self.Core.reasoning_model = True #fall back model is a reasoning model --> gpt5
|
| 414 |
+
logger.info(" ❌ [LLM] All Google models failed; attempting OpenAI fallback.")
|
| 415 |
+
print()
|
| 416 |
+
return self._get_response_google_with_fallback(prompt, instructions, validation_mode)
|
| 417 |
+
else:
|
| 418 |
+
# Nothing worked
|
| 419 |
+
logger.info(" ❌ All Google models and Openai fallback failed.")
|
| 420 |
+
raise RuntimeError("All Google models and Openai fallback failed.")
|
| 421 |
|
|
|
|
| 422 |
|
|
|
|
|
|
|
| 423 |
|
|
|
|
| 424 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
|
| 426 |
|
| 427 |
|
{Messaging_system → ai_messaging_system_v2/Messaging_system}/LLMR.py
RENAMED
|
@@ -3,17 +3,16 @@ This class is a LLM based recommender that can choose the perfect content for th
|
|
| 3 |
|
| 4 |
"""
|
| 5 |
import json
|
| 6 |
-
import os
|
| 7 |
-
import random
|
| 8 |
-
|
| 9 |
import pandas as pd
|
| 10 |
import openai
|
| 11 |
from openai import OpenAI
|
| 12 |
from dotenv import load_dotenv
|
| 13 |
import time
|
| 14 |
-
import streamlit as st
|
| 15 |
from tqdm import tqdm
|
| 16 |
-
from
|
|
|
|
|
|
|
|
|
|
| 17 |
load_dotenv()
|
| 18 |
|
| 19 |
|
|
@@ -27,7 +26,7 @@ class LLMR:
|
|
| 27 |
self.selected_content_ids = [] # will be populated for each user
|
| 28 |
self.random=random
|
| 29 |
|
| 30 |
-
def get_recommendations(self, progress_callback):
|
| 31 |
"""
|
| 32 |
selecting the recommended content for each user
|
| 33 |
:return:
|
|
@@ -38,7 +37,7 @@ class LLMR:
|
|
| 38 |
self.Core.users_df["recommendation_info"] = None
|
| 39 |
total_users = len(self.Core.users_df)
|
| 40 |
|
| 41 |
-
|
| 42 |
|
| 43 |
self.Core.start_time = time.time()
|
| 44 |
for progress, (idx, row) in enumerate(
|
|
@@ -77,6 +76,10 @@ class LLMR:
|
|
| 77 |
:return: content_id
|
| 78 |
"""
|
| 79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
if self.random: # select recommendations randomly from top options
|
| 81 |
return self._get_recommendation_random()
|
| 82 |
|
|
@@ -401,62 +404,122 @@ You are a helpful educational music content recommender. Your goal is to choose
|
|
| 401 |
# ==========================================================================
|
| 402 |
# Randomly select recommendations from top options
|
| 403 |
# ==========================================================================
|
| 404 |
-
# main random selector ---
|
| 405 |
def _get_recommendation_random(self):
|
| 406 |
"""
|
| 407 |
-
Randomly pick ONE item from the top-5 of each requested section.
|
|
|
|
| 408 |
Also remove the picked item from every section in recsys_json.
|
| 409 |
-
Returns: (
|
| 410 |
"""
|
| 411 |
-
|
|
|
|
|
|
|
| 412 |
recsys_json = self._get_user_recommendation()
|
| 413 |
try:
|
| 414 |
recsys_data = json.loads(recsys_json) if recsys_json else {}
|
| 415 |
except Exception:
|
| 416 |
recsys_data = {}
|
| 417 |
|
| 418 |
-
# 2) Build candidate pool (top 5 per section)
|
| 419 |
sections = self.Core.recsys_contents
|
| 420 |
-
|
|
|
|
|
|
|
| 421 |
|
| 422 |
# 3) Cold start or empty? -> use popular contents
|
| 423 |
-
|
|
|
|
| 424 |
recsys_data = self._get_popular_fallback_json(k=5)
|
| 425 |
-
|
|
|
|
| 426 |
|
| 427 |
# Still nothing? bail out
|
| 428 |
-
if not
|
| 429 |
return None, None, None, None
|
| 430 |
|
| 431 |
-
# 4)
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
for rec in candidates:
|
| 435 |
-
cid = rec.get("content_id")
|
| 436 |
-
if cid not in seen:
|
| 437 |
-
seen.add(cid)
|
| 438 |
-
unique_candidates.append(rec)
|
| 439 |
|
| 440 |
-
|
| 441 |
-
picked_id = picked_rec["content_id"]
|
| 442 |
-
recommendation_dict = self._get_recommendation_info(picked_id, recsys_data)
|
| 443 |
|
| 444 |
-
# 5)
|
| 445 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 446 |
|
|
|
|
|
|
|
| 447 |
|
| 448 |
-
#
|
| 449 |
-
self.selected_content_ids = [r["content_id"] for r in unique_candidates]
|
| 450 |
|
| 451 |
-
#
|
| 452 |
-
content_info = self._get_content_info(picked_id)
|
| 453 |
updated_json = json.dumps(recsys_data)
|
| 454 |
zero_tokens = {"prompt_tokens": 0, "completion_tokens": 0}
|
| 455 |
|
| 456 |
return recommendation_dict, content_info, updated_json, zero_tokens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 457 |
#======================================================================
|
| 458 |
# helpers used by the random path
|
| 459 |
#======================================================================
|
|
|
|
| 460 |
def _get_recommendation_info(self, content_id, recsys_data):
|
| 461 |
# Search through all categories in the recsys data
|
| 462 |
found_item=None
|
|
@@ -536,3 +599,94 @@ You are a helpful educational music content recommender. Your goal is to choose
|
|
| 536 |
recsys_data[sec] = [r for r in recs if r.get("content_id") != content_id]
|
| 537 |
return recsys_data
|
| 538 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
"""
|
| 5 |
import json
|
|
|
|
|
|
|
|
|
|
| 6 |
import pandas as pd
|
| 7 |
import openai
|
| 8 |
from openai import OpenAI
|
| 9 |
from dotenv import load_dotenv
|
| 10 |
import time
|
|
|
|
| 11 |
from tqdm import tqdm
|
| 12 |
+
from .Homepage_Recommender import DefaultRec
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger()
|
| 16 |
load_dotenv()
|
| 17 |
|
| 18 |
|
|
|
|
| 26 |
self.selected_content_ids = [] # will be populated for each user
|
| 27 |
self.random=random
|
| 28 |
|
| 29 |
+
def get_recommendations(self, progress_callback=None):
|
| 30 |
"""
|
| 31 |
selecting the recommended content for each user
|
| 32 |
:return:
|
|
|
|
| 37 |
self.Core.users_df["recommendation_info"] = None
|
| 38 |
total_users = len(self.Core.users_df)
|
| 39 |
|
| 40 |
+
logger.info("🎯 Choosing the best content to recommend ...")
|
| 41 |
|
| 42 |
self.Core.start_time = time.time()
|
| 43 |
for progress, (idx, row) in enumerate(
|
|
|
|
| 76 |
:return: content_id
|
| 77 |
"""
|
| 78 |
|
| 79 |
+
# NEW: Check if specific_content_id is set - if so, use it for all users
|
| 80 |
+
if self.Core.specific_content_id is not None:
|
| 81 |
+
return self._get_specific_content()
|
| 82 |
+
|
| 83 |
if self.random: # select recommendations randomly from top options
|
| 84 |
return self._get_recommendation_random()
|
| 85 |
|
|
|
|
| 404 |
# ==========================================================================
|
| 405 |
# Randomly select recommendations from top options
|
| 406 |
# ==========================================================================
|
|
|
|
| 407 |
def _get_recommendation_random(self):
|
| 408 |
"""
|
| 409 |
+
Randomly pick ONE valid item from the top-5 of each requested section.
|
| 410 |
+
If the first random pick is missing/invalid, keep trying other candidates.
|
| 411 |
Also remove the picked item from every section in recsys_json.
|
| 412 |
+
Returns: (recommendation_dict, content_info, updated_recsys_json, zero_tokens_dict)
|
| 413 |
"""
|
| 414 |
+
import json, random
|
| 415 |
+
|
| 416 |
+
# 1) Get user's recsys_result or fall back to {}
|
| 417 |
recsys_json = self._get_user_recommendation()
|
| 418 |
try:
|
| 419 |
recsys_data = json.loads(recsys_json) if recsys_json else {}
|
| 420 |
except Exception:
|
| 421 |
recsys_data = {}
|
| 422 |
|
|
|
|
| 423 |
sections = self.Core.recsys_contents
|
| 424 |
+
|
| 425 |
+
# 2) Primary candidate set
|
| 426 |
+
unique_candidates = self.build_unique_candidates(recsys_data, sections)
|
| 427 |
|
| 428 |
# 3) Cold start or empty? -> use popular contents
|
| 429 |
+
used_popular_fallback = False
|
| 430 |
+
if not unique_candidates:
|
| 431 |
recsys_data = self._get_popular_fallback_json(k=5)
|
| 432 |
+
unique_candidates = self.build_unique_candidates(recsys_data, sections)
|
| 433 |
+
used_popular_fallback = True
|
| 434 |
|
| 435 |
# Still nothing? bail out
|
| 436 |
+
if not unique_candidates:
|
| 437 |
return None, None, None, None
|
| 438 |
|
| 439 |
+
# 4) Try candidates in random order until a valid one is found
|
| 440 |
+
idxs = list(range(len(unique_candidates)))
|
| 441 |
+
random.shuffle(idxs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 442 |
|
| 443 |
+
picked_id, recommendation_dict, content_info = self.try_pick_from_candidates(idxs, unique_candidates, recsys_data)
|
|
|
|
|
|
|
| 444 |
|
| 445 |
+
# 5) If nothing valid from primary set, and we haven't tried popular fallback yet, try it now
|
| 446 |
+
if picked_id is None and not used_popular_fallback:
|
| 447 |
+
recsys_data = self._get_popular_fallback_json(k=5)
|
| 448 |
+
unique_candidates = self.build_unique_candidates(recsys_data, sections)
|
| 449 |
+
if unique_candidates:
|
| 450 |
+
idxs = list(range(len(unique_candidates)))
|
| 451 |
+
random.shuffle(idxs)
|
| 452 |
+
picked_id, recommendation_dict, content_info = self.try_pick_from_candidates(idxs, unique_candidates, recsys_data)
|
| 453 |
+
|
| 454 |
+
# 6) If still nothing, bail out
|
| 455 |
+
if picked_id is None:
|
| 456 |
+
return None, None, None, None
|
| 457 |
|
| 458 |
+
# 7) Remove picked_id from ALL sections and store back
|
| 459 |
+
recsys_data = self._remove_selected_from_all(recsys_data, picked_id)
|
| 460 |
|
| 461 |
+
# 8) Track available ids if you still need it elsewhere
|
| 462 |
+
self.selected_content_ids = [r["content_id"] for r in unique_candidates if r.get("content_id")]
|
| 463 |
|
| 464 |
+
# 9) Prepare return values
|
|
|
|
| 465 |
updated_json = json.dumps(recsys_data)
|
| 466 |
zero_tokens = {"prompt_tokens": 0, "completion_tokens": 0}
|
| 467 |
|
| 468 |
return recommendation_dict, content_info, updated_json, zero_tokens
|
| 469 |
+
|
| 470 |
+
# ====================================================================
|
| 471 |
+
def build_unique_candidates(self,src_data, sections):
|
| 472 |
+
# Build candidate pool (top 5 per section) and dedupe by content_id
|
| 473 |
+
cands = self._collect_top_k(src_data, sections, k=5)
|
| 474 |
+
seen, uniq = set(), []
|
| 475 |
+
for rec in cands or []:
|
| 476 |
+
cid = rec.get("content_id")
|
| 477 |
+
if cid and cid not in seen:
|
| 478 |
+
seen.add(cid)
|
| 479 |
+
uniq.append(rec)
|
| 480 |
+
return uniq
|
| 481 |
+
|
| 482 |
+
# ======================================================================
|
| 483 |
+
def try_pick_from_candidates(self, idxs, candidates, source_data):
|
| 484 |
+
"""
|
| 485 |
+
Iterate candidates in random order, returning the first valid pick:
|
| 486 |
+
(picked_id, recommendation_dict, content_info) or (None, None, None)
|
| 487 |
+
"""
|
| 488 |
+
banned_contents = set(self.Core.config_file.get("banned_contents", [])) # use set for faster lookup
|
| 489 |
+
|
| 490 |
+
for i in idxs:
|
| 491 |
+
rec = candidates[i]
|
| 492 |
+
picked_id = rec.get("content_id")
|
| 493 |
+
if not picked_id:
|
| 494 |
+
continue
|
| 495 |
+
# Skip if content is banned
|
| 496 |
+
if picked_id in banned_contents:
|
| 497 |
+
continue
|
| 498 |
+
try:
|
| 499 |
+
# Validate we can fetch both info payloads
|
| 500 |
+
content_info = self._get_content_info(picked_id)
|
| 501 |
+
if not content_info:
|
| 502 |
+
# Treat falsy/empty as invalid and keep searching
|
| 503 |
+
continue
|
| 504 |
+
|
| 505 |
+
recommendation_dict = self._get_recommendation_info(picked_id, source_data)
|
| 506 |
+
# If both succeed, we have a winner
|
| 507 |
+
return picked_id, recommendation_dict, content_info
|
| 508 |
+
|
| 509 |
+
except IndexError:
|
| 510 |
+
# Your reported failure mode; skip this candidate
|
| 511 |
+
continue
|
| 512 |
+
except KeyError:
|
| 513 |
+
continue
|
| 514 |
+
except Exception:
|
| 515 |
+
# Any unexpected data issue: skip and try the next
|
| 516 |
+
continue
|
| 517 |
+
return None, None, None
|
| 518 |
+
|
| 519 |
#======================================================================
|
| 520 |
# helpers used by the random path
|
| 521 |
#======================================================================
|
| 522 |
+
# =====================================================================
|
| 523 |
def _get_recommendation_info(self, content_id, recsys_data):
|
| 524 |
# Search through all categories in the recsys data
|
| 525 |
found_item=None
|
|
|
|
| 599 |
recsys_data[sec] = [r for r in recs if r.get("content_id") != content_id]
|
| 600 |
return recsys_data
|
| 601 |
|
| 602 |
+
# =====================================================================
|
| 603 |
+
def _get_specific_content(self):
|
| 604 |
+
"""
|
| 605 |
+
Get a specific content for all users when specific_content_id is set.
|
| 606 |
+
This overrides the AI recommendation system.
|
| 607 |
+
|
| 608 |
+
Returns:
|
| 609 |
+
tuple: (recommendation_dict, content_info, recsys_json, zero_tokens)
|
| 610 |
+
|
| 611 |
+
Raises:
|
| 612 |
+
ValueError: If specific_content_id is not found in content_info
|
| 613 |
+
"""
|
| 614 |
+
import json
|
| 615 |
+
|
| 616 |
+
specific_content_id = self.Core.specific_content_id
|
| 617 |
+
|
| 618 |
+
# Query content_info DataFrame for the specific content
|
| 619 |
+
content_row = self.Core.content_info[self.Core.content_info['content_id'] == specific_content_id]
|
| 620 |
+
|
| 621 |
+
if content_row.empty:
|
| 622 |
+
error_msg = f"❌ ERROR: specific_content_id {specific_content_id} not found in content database for brand '{self.Core.brand}'"
|
| 623 |
+
logger.error(error_msg)
|
| 624 |
+
raise ValueError(error_msg)
|
| 625 |
+
|
| 626 |
+
# Extract content details from DataFrame
|
| 627 |
+
try:
|
| 628 |
+
content_info = content_row['content_info'].iloc[0]
|
| 629 |
+
web_url_path = content_row['web_url_path'].iloc[0]
|
| 630 |
+
thumbnail_url = content_row['thumbnail_url'].iloc[0]
|
| 631 |
+
title = content_row['content_title'].iloc[0] # Map content_title to title
|
| 632 |
+
|
| 633 |
+
# Build recommendation dict in the same format as current system
|
| 634 |
+
recommendation_dict = {
|
| 635 |
+
"content_id": specific_content_id,
|
| 636 |
+
"web_url_path": web_url_path,
|
| 637 |
+
"title": title,
|
| 638 |
+
"thumbnail_url": thumbnail_url
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
# Build recsys_json for consistency (even though it's the same for all users)
|
| 642 |
+
recsys_data = {
|
| 643 |
+
"specific_content": [{
|
| 644 |
+
"content_id": specific_content_id,
|
| 645 |
+
"web_url_path": web_url_path,
|
| 646 |
+
"title": title,
|
| 647 |
+
"thumbnail_url": thumbnail_url,
|
| 648 |
+
"recommendation_rank": 1
|
| 649 |
+
}]
|
| 650 |
+
}
|
| 651 |
+
recsys_json = json.dumps(recsys_data)
|
| 652 |
+
|
| 653 |
+
# No tokens used since we're not calling LLM
|
| 654 |
+
zero_tokens = {"prompt_tokens": 0, "completion_tokens": 0}
|
| 655 |
+
|
| 656 |
+
logger.info(f"✅ Using specific content {specific_content_id} for all users: {title}")
|
| 657 |
+
|
| 658 |
+
return recommendation_dict, content_info, recsys_json, zero_tokens
|
| 659 |
+
|
| 660 |
+
except KeyError as e:
|
| 661 |
+
error_msg = f"❌ ERROR: Missing required field in content database for content_id {specific_content_id}: {str(e)}"
|
| 662 |
+
logger.error(error_msg)
|
| 663 |
+
raise ValueError(error_msg)
|
| 664 |
+
except IndexError as e:
|
| 665 |
+
error_msg = f"❌ ERROR: Unable to extract content data for content_id {specific_content_id}: {str(e)}"
|
| 666 |
+
logger.error(error_msg)
|
| 667 |
+
raise ValueError(error_msg)
|
| 668 |
+
|
| 669 |
+
# =====================================================================
|
| 670 |
+
def get_followup_recommendation(self):
|
| 671 |
+
"""
|
| 672 |
+
get follow up recommendation for all users considering available contents
|
| 673 |
+
:return:
|
| 674 |
+
"""
|
| 675 |
+
default = DefaultRec(self.Core)
|
| 676 |
+
for idx, row in self.Core.users_df.iterrows():
|
| 677 |
+
self.user = row
|
| 678 |
+
recommendation_dict, content_info, recsys_json, _ = self._get_recommendation()
|
| 679 |
+
|
| 680 |
+
if recommendation_dict["content_id"] is None: # error in selecting a content to recommend
|
| 681 |
+
self.Core.users_df.at[idx, "recommendation"] = default.recommendation
|
| 682 |
+
self.Core.users_df.at[idx, "recommendation_info"] = default.recommendation_info
|
| 683 |
+
self.Core.users_df.at[idx, "recsys_result"] = default.for_you_url
|
| 684 |
+
|
| 685 |
+
else:
|
| 686 |
+
self.Core.users_df.at[idx, "recommendation"] = recommendation_dict
|
| 687 |
+
self.Core.users_df.at[idx, "recommendation_info"] = content_info
|
| 688 |
+
self.Core.users_df.at[idx, "recsys_result"] = recsys_json
|
| 689 |
+
|
| 690 |
+
return self.Core
|
| 691 |
+
|
| 692 |
+
|
{Messaging_system → ai_messaging_system_v2/Messaging_system}/Message_generator.py
RENAMED
|
@@ -1,16 +1,16 @@
|
|
| 1 |
"""
|
| 2 |
-
|
|
|
|
| 3 |
"""
|
| 4 |
import json
|
| 5 |
import time
|
| 6 |
from openai import OpenAI
|
| 7 |
from tqdm import tqdm
|
| 8 |
-
import streamlit as st
|
| 9 |
-
|
| 10 |
-
from Messaging_system.MultiMessage import MultiMessage
|
| 11 |
-
from Messaging_system.protection_layer import ProtectionLayer
|
| 12 |
import openai
|
| 13 |
-
from
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
class MessageGenerator:
|
|
@@ -18,84 +18,206 @@ class MessageGenerator:
|
|
| 18 |
def __init__(self, CoreConfig):
|
| 19 |
self.Core = CoreConfig
|
| 20 |
self.llm = LLM(CoreConfig)
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
# =================================================================
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
| 25 |
"""
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
total_users = len(self.Core.users_df)
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
self.Core.start_time = time.time()
|
| 34 |
-
|
| 35 |
-
|
| 36 |
# Update progress if callback is provided
|
| 37 |
if progress_callback is not None:
|
| 38 |
progress_callback(progress, total_users)
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
message = first_message
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
# self.Core.total_tokens['completion_tokens'] += total_tokens['completion_tokens']
|
| 53 |
-
# self.Core.temp_token_counter += total_tokens['prompt_tokens'] + total_tokens['completion_tokens']
|
| 54 |
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
message =
|
| 59 |
-
|
| 60 |
-
row["message"] = message
|
| 61 |
-
else:
|
| 62 |
self.Core.users_df.at[idx, "message"] = None
|
| 63 |
-
self.Core.checkpoint()
|
| 64 |
-
self.Core.respect_request_ratio()
|
| 65 |
else:
|
| 66 |
self.Core.users_df.at[idx, "message"] = None
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
-
#
|
| 69 |
-
|
| 70 |
-
self.Core.users_df.at[idx, "message"] is not None and row["message"] is not None:
|
| 71 |
-
MM = MultiMessage(self.Core)
|
| 72 |
-
message = MM.generate_multi_messages(row)
|
| 73 |
-
self.Core.users_df.at[idx, "message"] = message
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
try:
|
| 84 |
-
|
|
|
|
| 85 |
except json.JSONDecodeError:
|
| 86 |
-
|
| 87 |
-
pass
|
| 88 |
-
|
| 89 |
-
msg_wrapper = {"messages_sequence": [single_msg]}
|
| 90 |
-
# Again, store a proper JSON string
|
| 91 |
-
self.Core.users_df.at[idx, "message"] = json.dumps(msg_wrapper,
|
| 92 |
-
ensure_ascii=False)
|
| 93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
else:
|
| 95 |
self.Core.users_df.at[idx, "message"] = None
|
| 96 |
|
| 97 |
return self.Core
|
| 98 |
-
|
| 99 |
# --------------------------------------------------------------
|
| 100 |
# --------------------------------------------------------------
|
| 101 |
def parsing_output_message(self, message, user):
|
|
@@ -136,24 +258,31 @@ class MessageGenerator:
|
|
| 136 |
def fetch_recommendation_data(self, user, message):
|
| 137 |
|
| 138 |
if user["recommendation"] == "for_you":
|
|
|
|
|
|
|
|
|
|
| 139 |
output_message = {
|
| 140 |
"header": message.get("header"),
|
| 141 |
"message": message.get("message"),
|
| 142 |
"content_id": None,
|
| 143 |
-
"web_url_path":
|
| 144 |
"title": user["recommendation"],
|
| 145 |
-
"thumbnail_url": None
|
|
|
|
| 146 |
}
|
| 147 |
else:
|
| 148 |
recommendation_dict = user["recommendation"]
|
| 149 |
content_id = int(recommendation_dict["content_id"])
|
| 150 |
|
| 151 |
-
# Extract required fields
|
| 152 |
web_url_path = recommendation_dict["web_url_path"]
|
| 153 |
title = recommendation_dict["title"]
|
| 154 |
thumbnail_url = recommendation_dict["thumbnail_url"]
|
| 155 |
|
| 156 |
-
message
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
# Add these to the message dict
|
| 159 |
output_message = {
|
|
@@ -162,7 +291,8 @@ class MessageGenerator:
|
|
| 162 |
"content_id": content_id,
|
| 163 |
"web_url_path": web_url_path,
|
| 164 |
"title": title,
|
| 165 |
-
"thumbnail_url": thumbnail_url
|
|
|
|
| 166 |
}
|
| 167 |
return output_message
|
| 168 |
|
|
@@ -174,38 +304,25 @@ class MessageGenerator:
|
|
| 174 |
:return: instructions as string
|
| 175 |
"""
|
| 176 |
|
| 177 |
-
|
| 178 |
-
|
| 179 |
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
instructions += f"""
|
| 187 |
-
Here are some examples of things that an instructor would realistically say to a student, to give you a general sense of tone and phrasing:
|
| 188 |
-
|
| 189 |
-
Common instructor phrases:
|
| 190 |
-
{self.Core.brand_voice}
|
| 191 |
-
"""
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
rules = f"""
|
| 195 |
-
ABSOLUTE RULE – OVERRIDES EVERYTHING ELSE:
|
| 196 |
-
the header and the message **MUST NOT** contain any banned word or phrases(case-insensitive; singular, plural, verb forms, or their derivatives)
|
| 197 |
-
- **important Note:** header **must be** less than {self.Core.config_file["header_limit"]} characters and message **must be less** than {self.Core.config_file["message_limit"]} characters.
|
| 198 |
-
|
| 199 |
-
Banned word:
|
| 200 |
{jargon_list}
|
| 201 |
-
|
| 202 |
"""
|
| 203 |
-
if banned_phrases is not None:
|
| 204 |
-
rules += banned_phrases
|
| 205 |
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
{
|
| 209 |
-
|
|
|
|
| 210 |
|
| 211 |
-
return
|
|
|
|
| 1 |
"""
|
| 2 |
+
This class will generate message or messages based on the number of requested.
|
| 3 |
+
Now uses agentic workflow for enhanced quality control.
|
| 4 |
"""
|
| 5 |
import json
|
| 6 |
import time
|
| 7 |
from openai import OpenAI
|
| 8 |
from tqdm import tqdm
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import openai
|
| 10 |
+
from .LLM import LLM
|
| 11 |
+
from .agents import AgentOrchestrator, RejectionLogger
|
| 12 |
+
import logging
|
| 13 |
+
logger = logging.getLogger()
|
| 14 |
|
| 15 |
|
| 16 |
class MessageGenerator:
|
|
|
|
| 18 |
def __init__(self, CoreConfig):
|
| 19 |
self.Core = CoreConfig
|
| 20 |
self.llm = LLM(CoreConfig)
|
| 21 |
+
self.use_agentic_workflow = True # Enable agentic workflow by default
|
| 22 |
+
self.agent_orchestrator = None
|
| 23 |
+
self.rejection_logger = None
|
| 24 |
|
| 25 |
# =================================================================
|
| 26 |
+
def structured_output(self, msg, step):
|
| 27 |
+
"""
|
| 28 |
+
creating final structured JSON output to store for the given step
|
| 29 |
+
:return:
|
| 30 |
"""
|
| 31 |
+
|
| 32 |
+
if msg is not None:
|
| 33 |
+
# If it's still a JSON string, turn it into a dict
|
| 34 |
+
if isinstance(msg, str):
|
| 35 |
+
try:
|
| 36 |
+
msg = json.loads(msg)
|
| 37 |
+
except json.JSONDecodeError:
|
| 38 |
+
raise ValueError(f"output is not a JSON string: \n {msg}") # Leave as-is if not valid JSON
|
| 39 |
+
|
| 40 |
+
# Build the new wrapper with step integer key
|
| 41 |
+
msg_wrapper = {
|
| 42 |
+
str(step): { # convert to str so it's valid JSON key
|
| 43 |
+
"header": msg.get("header"),
|
| 44 |
+
"message": msg.get("message"),
|
| 45 |
+
"content_id": msg.get("content_id"),
|
| 46 |
+
"web_url_path": msg.get("web_url_path"),
|
| 47 |
+
"title": msg.get("title"),
|
| 48 |
+
"thumbnail_url": msg.get("thumbnail_url"),
|
| 49 |
+
"deeplink" : msg.get("deeplink")
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
return msg_wrapper
|
| 53 |
+
else:
|
| 54 |
+
return None
|
| 55 |
+
# ==================================================================
|
| 56 |
+
|
| 57 |
+
def generate_messages(self, step=1, progress_callback=None):
|
| 58 |
"""
|
| 59 |
+
Generate messages using the agentic workflow (new default) or legacy method.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
step: Campaign stage number
|
| 63 |
+
progress_callback: Optional progress callback function
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
Updated CoreConfig instance
|
| 67 |
+
"""
|
| 68 |
+
if self.use_agentic_workflow:
|
| 69 |
+
return self.generate_messages_with_agents(step, progress_callback)
|
| 70 |
+
else:
|
| 71 |
+
return self.generate_messages_legacy(step, progress_callback)
|
| 72 |
+
|
| 73 |
+
def generate_messages_with_agents(self, step=1, progress_callback=None):
|
| 74 |
+
"""
|
| 75 |
+
Generate messages using the agentic workflow with GeneratorAgent and SecurityAgent.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
step: Campaign stage number
|
| 79 |
+
progress_callback: Optional progress callback function
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
Updated CoreConfig instance
|
| 83 |
+
"""
|
| 84 |
+
# CRITICAL: Deduplicate users_df by user_id before processing
|
| 85 |
+
# This ensures we only generate ONE message per user, not multiple messages for duplicate rows
|
| 86 |
+
initial_count = len(self.Core.users_df)
|
| 87 |
+
user_id_col = 'user_id' if 'user_id' in self.Core.users_df.columns else 'USER_ID'
|
| 88 |
+
|
| 89 |
+
if user_id_col in self.Core.users_df.columns:
|
| 90 |
+
# Keep the first occurrence of each user_id
|
| 91 |
+
self.Core.users_df = self.Core.users_df.drop_duplicates(subset=[user_id_col], keep='first')
|
| 92 |
+
deduped_count = len(self.Core.users_df)
|
| 93 |
+
|
| 94 |
+
if initial_count > deduped_count:
|
| 95 |
+
logger.warning(f"⚠️ Removed {initial_count - deduped_count} duplicate user rows before message generation")
|
| 96 |
+
print(f"⚠️ Removed {initial_count - deduped_count} duplicate user rows before message generation")
|
| 97 |
|
| 98 |
total_users = len(self.Core.users_df)
|
| 99 |
+
logger.info(f"⏳ Generating messages for {total_users} users using agentic workflow")
|
| 100 |
+
|
| 101 |
+
# Initialize rejection logger
|
| 102 |
+
campaign_name = getattr(self.Core, 'campaign_name', 'unknown_campaign')
|
| 103 |
+
self.rejection_logger = RejectionLogger(
|
| 104 |
+
campaign_name=campaign_name,
|
| 105 |
+
brand=self.Core.brand,
|
| 106 |
+
stage=step
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# Initialize agent orchestrator
|
| 110 |
+
self.agent_orchestrator = AgentOrchestrator(
|
| 111 |
+
core_config=self.Core,
|
| 112 |
+
rejection_logger=self.rejection_logger
|
| 113 |
+
)
|
| 114 |
|
| 115 |
self.Core.start_time = time.time()
|
| 116 |
+
|
| 117 |
+
for progress, (idx, row) in enumerate(tqdm(self.Core.users_df.iterrows(), desc="generating messages (agentic)")):
|
| 118 |
# Update progress if callback is provided
|
| 119 |
if progress_callback is not None:
|
| 120 |
progress_callback(progress, total_users)
|
| 121 |
|
| 122 |
+
# Use agent orchestrator to generate and validate
|
| 123 |
+
result = self.agent_orchestrator.generate_and_validate_message(row, step)
|
| 124 |
|
| 125 |
+
if result is not None:
|
| 126 |
+
# Message was approved
|
| 127 |
+
header = result["header"]
|
| 128 |
+
message_text = result["message"]
|
|
|
|
| 129 |
|
| 130 |
+
# Create message dict for parsing
|
| 131 |
+
message_dict = {
|
| 132 |
+
"header": header,
|
| 133 |
+
"message": message_text
|
| 134 |
+
}
|
| 135 |
|
| 136 |
+
# Parse and enrich the message
|
| 137 |
+
parsed_message = self.parsing_output_message(message_dict, row)
|
|
|
|
|
|
|
| 138 |
|
| 139 |
+
if parsed_message:
|
| 140 |
+
final_message = self.structured_output(parsed_message, step)
|
| 141 |
+
try:
|
| 142 |
+
self.Core.users_df.at[idx, "message"] = json.dumps(final_message, ensure_ascii=False)
|
| 143 |
+
except json.JSONDecodeError:
|
|
|
|
|
|
|
| 144 |
self.Core.users_df.at[idx, "message"] = None
|
|
|
|
|
|
|
| 145 |
else:
|
| 146 |
self.Core.users_df.at[idx, "message"] = None
|
| 147 |
+
else:
|
| 148 |
+
# Failed after all attempts
|
| 149 |
+
self.Core.users_df.at[idx, "message"] = None
|
| 150 |
|
| 151 |
+
# Respect rate limits
|
| 152 |
+
self.Core.respect_request_ratio()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
+
# Log rejection statistics
|
| 155 |
+
if self.rejection_logger:
|
| 156 |
+
stats = self.rejection_logger.get_rejection_stats()
|
| 157 |
+
logger.info(f"Rejection stats: {stats}")
|
| 158 |
+
logger.info(f"Rejection log saved to: {self.rejection_logger.get_log_path()}")
|
| 159 |
+
|
| 160 |
+
return self.Core
|
| 161 |
+
|
| 162 |
+
def generate_messages_legacy(self, step=1, progress_callback=None):
|
| 163 |
+
"""
|
| 164 |
+
Legacy message generation method (original implementation).
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
step: Campaign stage number
|
| 168 |
+
progress_callback: Optional progress callback function
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
Updated CoreConfig instance
|
| 172 |
+
"""
|
| 173 |
+
# CRITICAL: Deduplicate users_df by user_id before processing
|
| 174 |
+
# This ensures we only generate ONE message per user, not multiple messages for duplicate rows
|
| 175 |
+
initial_count = len(self.Core.users_df)
|
| 176 |
+
user_id_col = 'user_id' if 'user_id' in self.Core.users_df.columns else 'USER_ID'
|
| 177 |
+
|
| 178 |
+
if user_id_col in self.Core.users_df.columns:
|
| 179 |
+
# Keep the first occurrence of each user_id
|
| 180 |
+
self.Core.users_df = self.Core.users_df.drop_duplicates(subset=[user_id_col], keep='first')
|
| 181 |
+
deduped_count = len(self.Core.users_df)
|
| 182 |
+
|
| 183 |
+
if initial_count > deduped_count:
|
| 184 |
+
logger.warning(f"⚠️ Removed {initial_count - deduped_count} duplicate user rows before message generation")
|
| 185 |
+
print(f"⚠️ Removed {initial_count - deduped_count} duplicate user rows before message generation")
|
| 186 |
+
|
| 187 |
+
total_users = len(self.Core.users_df)
|
| 188 |
+
logger.info("⏳ generating messages for {} users (legacy mode)".format(total_users))
|
| 189 |
+
|
| 190 |
+
self.Core.start_time = time.time()
|
| 191 |
+
for progress, (idx, row) in enumerate(tqdm(self.Core.users_df.iterrows(), desc="generating messages")):
|
| 192 |
+
# if we have a prompt to generate a personalized message
|
| 193 |
+
# Update progress if callback is provided
|
| 194 |
+
if progress_callback is not None:
|
| 195 |
+
progress_callback(progress, total_users)
|
| 196 |
+
|
| 197 |
+
if row["prompt"] is not None:
|
| 198 |
+
message = self.llm.get_response(prompt=row["prompt"], instructions=self.llm_instructions())
|
| 199 |
+
|
| 200 |
+
if message is not None:
|
| 201 |
+
|
| 202 |
+
# double check output structure
|
| 203 |
+
if isinstance(message, dict) and "message" in message and isinstance(message["message"], str):
|
| 204 |
+
# parsing output result
|
| 205 |
+
message = self.parsing_output_message(message, row)
|
| 206 |
+
final_message = self.structured_output(message, step)
|
| 207 |
try:
|
| 208 |
+
self.Core.users_df.at[idx, "message"] = json.dumps(final_message, ensure_ascii=False)
|
| 209 |
+
row["message"] = final_message
|
| 210 |
except json.JSONDecodeError:
|
| 211 |
+
self.Core.users_df.at[idx, "message"] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
+
else:
|
| 214 |
+
self.Core.users_df.at[idx, "message"] = None
|
| 215 |
+
# self.Core.checkpoint()
|
| 216 |
+
self.Core.respect_request_ratio()
|
| 217 |
else:
|
| 218 |
self.Core.users_df.at[idx, "message"] = None
|
| 219 |
|
| 220 |
return self.Core
|
|
|
|
| 221 |
# --------------------------------------------------------------
|
| 222 |
# --------------------------------------------------------------
|
| 223 |
def parsing_output_message(self, message, user):
|
|
|
|
| 258 |
def fetch_recommendation_data(self, user, message):
|
| 259 |
|
| 260 |
if user["recommendation"] == "for_you":
|
| 261 |
+
web_url_path = user["recsys_result"]
|
| 262 |
+
deeplink = web_url_path.replace("https://www.musora.com", "musora:/")
|
| 263 |
+
|
| 264 |
output_message = {
|
| 265 |
"header": message.get("header"),
|
| 266 |
"message": message.get("message"),
|
| 267 |
"content_id": None,
|
| 268 |
+
"web_url_path": web_url_path,
|
| 269 |
"title": user["recommendation"],
|
| 270 |
+
"thumbnail_url": None,
|
| 271 |
+
"deeplink": deeplink
|
| 272 |
}
|
| 273 |
else:
|
| 274 |
recommendation_dict = user["recommendation"]
|
| 275 |
content_id = int(recommendation_dict["content_id"])
|
| 276 |
|
| 277 |
+
# Extract required fields
|
| 278 |
web_url_path = recommendation_dict["web_url_path"]
|
| 279 |
title = recommendation_dict["title"]
|
| 280 |
thumbnail_url = recommendation_dict["thumbnail_url"]
|
| 281 |
|
| 282 |
+
# Clean up message text (although this doesn’t change `message` in place)
|
| 283 |
+
message["message"] = message["message"].replace('\\', '').replace('"', '')
|
| 284 |
+
|
| 285 |
+
deeplink = web_url_path.replace("https://www.musora.com", "musora:/")
|
| 286 |
|
| 287 |
# Add these to the message dict
|
| 288 |
output_message = {
|
|
|
|
| 291 |
"content_id": content_id,
|
| 292 |
"web_url_path": web_url_path,
|
| 293 |
"title": title,
|
| 294 |
+
"thumbnail_url": thumbnail_url,
|
| 295 |
+
"deeplink": deeplink
|
| 296 |
}
|
| 297 |
return output_message
|
| 298 |
|
|
|
|
| 304 |
:return: instructions as string
|
| 305 |
"""
|
| 306 |
|
| 307 |
+
banned_phrases = "\n".join(f"- {word}" for word in self.Core.config_file["AI_Jargon"])
|
| 308 |
+
jargon_list = "\n".join(f"- {word}" for word in self.Core.config_file[f"AI_phrases_{self.Core.brand}"])
|
| 309 |
|
| 310 |
+
if self.Core.personalization:
|
| 311 |
+
instructions = f"""
|
| 312 |
+
Your task is to select the best 'header' and a 'message' for a {self.Core.get_instrument()} student as a push notification.
|
| 313 |
+
Based on the user instructions, you might need to **modify the selected option** very minimal and slightly to improve personalization if capable while preserving the original brand voice, tone, rhythm, and structure.
|
| 314 |
+
**Important Note**: header < {self.Core.config_file["header_limit"]} and message < {self.Core.config_file["message_limit"]} characters.
|
| 315 |
+
**Important Note**: NEVER use time-related words (“new,” “recent,” “latest,” etc.) and NEVER imply recency in any way.
|
| 316 |
|
| 317 |
+
# Don't use below phrases, words, or similar variations of them:
|
| 318 |
+
{banned_phrases}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 319 |
{jargon_list}
|
|
|
|
| 320 |
"""
|
|
|
|
|
|
|
| 321 |
|
| 322 |
+
else:
|
| 323 |
+
instructions = f"""
|
| 324 |
+
Your task is to select the best 'header' and a 'message' for a {self.Core.get_instrument()} student as a push notification.
|
| 325 |
+
DO NOT **change** or **modify** or **add to** the selected option in any shape or form. **Use the exact original selected header and message without ANY change**
|
| 326 |
+
"""
|
| 327 |
|
| 328 |
+
return instructions
|
ai_messaging_system_v2/Messaging_system/Permes.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
the flow of the Program starts from create_personalized_message function
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import os
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from .DataCollector import DataCollector
|
| 11 |
+
from .CoreConfig import CoreConfig
|
| 12 |
+
from .LLMR import LLMR
|
| 13 |
+
from .Message_generator import MessageGenerator
|
| 14 |
+
from .PromptGenerator import PromptGenerator
|
| 15 |
+
try:
|
| 16 |
+
from ..database import DatabaseManager
|
| 17 |
+
except ImportError:
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
import sys
|
| 20 |
+
sys.path.append(str(Path(__file__).parent.parent))
|
| 21 |
+
from database import DatabaseManager
|
| 22 |
+
from .Homepage_Recommender import DefaultRec
|
| 23 |
+
from datetime import datetime, timezone
|
| 24 |
+
import logging
|
| 25 |
+
logger = logging.getLogger()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Permes:
|
| 29 |
+
"""
|
| 30 |
+
LLM-based personalized message generator:
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
# UI mode constants
|
| 34 |
+
UI_OUTPUT_DIR = Path(__file__).parent.parent / "Data" / "ui_output"
|
| 35 |
+
UI_OUTPUT_FILE = "messages.csv"
|
| 36 |
+
|
| 37 |
+
def create_personalize_messages(self, session, users, brand, config_file,
|
| 38 |
+
platform="push", stage=1, test_mode=False, mode="production"
|
| 39 |
+
, recsys_contents=None, model=None, identifier_column="email", segment_info=None,
|
| 40 |
+
sample_example=None, number_of_samples=None, involve_recsys_result=True,
|
| 41 |
+
personalization=True, campaign_name="no_recent_activity",
|
| 42 |
+
campaign_instructions=None, per_message_instructions=None,
|
| 43 |
+
specific_content_id=None, ui_experiment_id=None):
|
| 44 |
+
"""
|
| 45 |
+
:param campaign_name:
|
| 46 |
+
:param session: Snowflake session object
|
| 47 |
+
:param users: users dataframe
|
| 48 |
+
:param brand: brand name
|
| 49 |
+
:param config_file:
|
| 50 |
+
:param platform: push/app
|
| 51 |
+
:param stage: message number
|
| 52 |
+
:param test_mode: Boolean, if True uses test campaign name
|
| 53 |
+
:param mode: str, operating mode - "production", "test", or "ui"
|
| 54 |
+
:param recsys_contents: [course, quicktip, workout, song]
|
| 55 |
+
:param model: llm model name
|
| 56 |
+
:param identifier_column: email/user_id
|
| 57 |
+
:param segment_info: common info about users
|
| 58 |
+
:param sample_example: sample message
|
| 59 |
+
:param number_of_samples: number of messages to generate
|
| 60 |
+
:param involve_recsys_result: Boolean, recommend a content?
|
| 61 |
+
:param personalization: Boolean, personalized messages?
|
| 62 |
+
:param campaign_instructions: Optional campaign-wide instructions
|
| 63 |
+
:param per_message_instructions: Optional stage-specific instructions
|
| 64 |
+
:param specific_content_id: Optional content ID to force for all users
|
| 65 |
+
:param ui_experiment_id: Optional experiment ID for UI mode (e.g., 'messages_a_drumeo_20260111_1756')
|
| 66 |
+
:return:
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
# primary processing
|
| 70 |
+
users = self.identify_users(users_df=users, identifier_column=identifier_column)
|
| 71 |
+
|
| 72 |
+
personalize_message = CoreConfig(session=session,
|
| 73 |
+
users_df=users,
|
| 74 |
+
brand=brand,
|
| 75 |
+
platform=platform,
|
| 76 |
+
config_file=config_file)
|
| 77 |
+
|
| 78 |
+
personalize_message.set_segment_name(campaign_name=campaign_name)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
if sample_example is not None: # Check if sample_example is not empty
|
| 82 |
+
personalize_message.set_sample_example(sample_example)
|
| 83 |
+
|
| 84 |
+
if number_of_samples is not None:
|
| 85 |
+
personalize_message.set_number_of_samples(number_of_samples)
|
| 86 |
+
|
| 87 |
+
if model is not None:
|
| 88 |
+
personalize_message.set_llm_model(model)
|
| 89 |
+
|
| 90 |
+
if segment_info is not None:
|
| 91 |
+
personalize_message.set_segment_info(segment_info)
|
| 92 |
+
|
| 93 |
+
if personalization:
|
| 94 |
+
personalize_message.set_personalization()
|
| 95 |
+
|
| 96 |
+
if involve_recsys_result:
|
| 97 |
+
personalize_message.set_messaging_mode("recsys_result")
|
| 98 |
+
personalize_message.set_involve_recsys_result(involve_recsys_result)
|
| 99 |
+
|
| 100 |
+
if recsys_contents is not None:
|
| 101 |
+
personalize_message.set_recsys_contents(recsys_contents)
|
| 102 |
+
|
| 103 |
+
# Set campaign and per-message instructions
|
| 104 |
+
if campaign_instructions is not None:
|
| 105 |
+
personalize_message.campaign_instructions = campaign_instructions
|
| 106 |
+
if per_message_instructions is not None:
|
| 107 |
+
personalize_message.per_message_instructions = per_message_instructions
|
| 108 |
+
|
| 109 |
+
# Set specific content ID for forcing specific content for all users
|
| 110 |
+
if specific_content_id is not None:
|
| 111 |
+
personalize_message.specific_content_id = specific_content_id
|
| 112 |
+
|
| 113 |
+
if stage == 1:
|
| 114 |
+
users_df = self._create_personalized_message(CoreConfig=personalize_message)
|
| 115 |
+
else:
|
| 116 |
+
users_df = self._create_followup_personalized_message(CoreConfig=personalize_message, stage=stage, test_mode=test_mode, mode=mode, ui_experiment_id=ui_experiment_id)
|
| 117 |
+
|
| 118 |
+
if users_df is None:
|
| 119 |
+
return users_df
|
| 120 |
+
|
| 121 |
+
total_prompt_tokens = personalize_message.total_tokens["prompt_tokens"]
|
| 122 |
+
total_completion_tokens = personalize_message.total_tokens["completion_tokens"]
|
| 123 |
+
total_cost = self.calculate_cost(total_prompt_tokens, total_completion_tokens, model)
|
| 124 |
+
|
| 125 |
+
logger.info(f"Estimated Cost (USD): {total_cost:.5f} ---> Number of messages: {(len(users_df))}")
|
| 126 |
+
print(f"Estimated Cost (USD): {total_cost:.5f} ---> Number of messages: {(len(users_df))}")
|
| 127 |
+
|
| 128 |
+
now_utc = datetime.now(timezone.utc)
|
| 129 |
+
|
| 130 |
+
# Create dataframe
|
| 131 |
+
message_cost = pd.DataFrame([{
|
| 132 |
+
"brand": brand,
|
| 133 |
+
"campaign_name": campaign_name,
|
| 134 |
+
"number_of_messages": len(users_df),
|
| 135 |
+
"model": model,
|
| 136 |
+
"stage": stage,
|
| 137 |
+
"total_prompt_tokens": total_prompt_tokens,
|
| 138 |
+
"total_completion_tokens": total_completion_tokens,
|
| 139 |
+
"total_cost": total_cost,
|
| 140 |
+
"timestamp": now_utc,
|
| 141 |
+
}])
|
| 142 |
+
|
| 143 |
+
snowflake_conn = DatabaseManager(session=session, brand=brand)
|
| 144 |
+
final_df = snowflake_conn.adjust_dataframe(users_df, stage)
|
| 145 |
+
|
| 146 |
+
# CRITICAL: Final deduplication by (user_id, stage) before storage
|
| 147 |
+
# This is a safety net to ensure no duplicates are ever written
|
| 148 |
+
initial_count = len(final_df)
|
| 149 |
+
user_id_col = 'user_id' if 'user_id' in final_df.columns else 'USER_ID'
|
| 150 |
+
|
| 151 |
+
if user_id_col in final_df.columns and 'stage' in final_df.columns:
|
| 152 |
+
# Keep the first occurrence of each (user_id, stage) pair
|
| 153 |
+
final_df = final_df.drop_duplicates(subset=[user_id_col, 'stage'], keep='first')
|
| 154 |
+
deduped_count = len(final_df)
|
| 155 |
+
|
| 156 |
+
if initial_count > deduped_count:
|
| 157 |
+
logger.warning(f"⚠️ Removed {initial_count - deduped_count} duplicate (user_id, stage) records before storage")
|
| 158 |
+
print(f"⚠️ Removed {initial_count - deduped_count} duplicate (user_id, stage) records before storage")
|
| 159 |
+
|
| 160 |
+
# UI mode: Store to local CSV instead of Snowflake
|
| 161 |
+
if mode == "ui":
|
| 162 |
+
self._store_to_csv_ui_mode(final_df, message_cost, ui_experiment_id)
|
| 163 |
+
snowflake_conn.close_connection()
|
| 164 |
+
else:
|
| 165 |
+
# Production/Test mode: Store to Snowflake
|
| 166 |
+
snowflake_conn.store_df_to_snowflake(table_name="initial_messages", dataframe=final_df,
|
| 167 |
+
database=None, schema=None, overwrite=False)
|
| 168 |
+
|
| 169 |
+
snowflake_conn.store_df_to_snowflake(table_name="message_cost", dataframe=message_cost,
|
| 170 |
+
database=None, schema=None, overwrite=False)
|
| 171 |
+
snowflake_conn.close_connection()
|
| 172 |
+
|
| 173 |
+
return users_df
|
| 174 |
+
|
| 175 |
+
#======================================================
|
| 176 |
+
def calculate_cost(self, total_prompt_tokens, total_completion_tokens, model):
|
| 177 |
+
input_price, output_price = self.get_model_price(model)
|
| 178 |
+
|
| 179 |
+
total_cost = ((total_prompt_tokens / 1000000) * input_price) + (
|
| 180 |
+
(total_completion_tokens / 1000000) * output_price) # Cost calculation estimation
|
| 181 |
+
|
| 182 |
+
return total_cost
|
| 183 |
+
|
| 184 |
+
# ====================================================
|
| 185 |
+
def get_model_price(self, model):
|
| 186 |
+
"""
|
| 187 |
+
getting the input price and output price per 1m token for the requested model
|
| 188 |
+
:param model:
|
| 189 |
+
:return:
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
input_prices = {
|
| 193 |
+
"gpt-4o-mini":0.15,
|
| 194 |
+
"gpt-4.1-mini":0.4,
|
| 195 |
+
"gpt-5-mini": 0.25,
|
| 196 |
+
"gpt-5-nano": 0.05,
|
| 197 |
+
"gemini-2.5-flash":0.3,
|
| 198 |
+
"gemini-2.0-flash":0.1,
|
| 199 |
+
"gemini-2.5-flash-lite":0.1
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
out_prices = {
|
| 203 |
+
"gpt-4o-mini":0.6,
|
| 204 |
+
"gpt-4.1-mini":1.6,
|
| 205 |
+
"gpt-5-mini": 2,
|
| 206 |
+
"gpt-5-nano": 0.4,
|
| 207 |
+
"gemini-2.5-flash":2.5,
|
| 208 |
+
"gemini-2.0-flash":0.7,
|
| 209 |
+
"gemini-2.5-flash-lite":0.4
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
i_price = input_prices.get(model, 0)
|
| 213 |
+
o_price= out_prices.get(model, 0)
|
| 214 |
+
|
| 215 |
+
return i_price, o_price
|
| 216 |
+
|
| 217 |
+
# =====================================================
|
| 218 |
+
def identify_users(self, users_df, identifier_column):
|
| 219 |
+
"""
|
| 220 |
+
specifying the users for identification
|
| 221 |
+
:param identifier_column:
|
| 222 |
+
:return: updated users
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
if identifier_column.upper() == "EMAIL":
|
| 226 |
+
return users_df
|
| 227 |
+
else:
|
| 228 |
+
users_df.rename(columns={identifier_column: "USER_ID"}, inplace=True)
|
| 229 |
+
return users_df
|
| 230 |
+
|
| 231 |
+
# ------------------------------------------------------------------
|
| 232 |
+
def _create_personalized_message(self, CoreConfig):
|
| 233 |
+
"""
|
| 234 |
+
main function of the class to flow the work between functions inorder to create personalized messages.
|
| 235 |
+
:return: updated users_df with extracted information and personalize messages.
|
| 236 |
+
"""
|
| 237 |
+
# Collecting all the data that we need to personalize messages
|
| 238 |
+
datacollect = DataCollector(CoreConfig)
|
| 239 |
+
CoreConfig = datacollect.gather_data()
|
| 240 |
+
|
| 241 |
+
if len(CoreConfig.users_df) == 0:
|
| 242 |
+
print("No valid user at the moment")
|
| 243 |
+
return None
|
| 244 |
+
|
| 245 |
+
else:
|
| 246 |
+
|
| 247 |
+
# generating recommendations for users, if we want to include recommendations in the message
|
| 248 |
+
if CoreConfig.involve_recsys_result and CoreConfig.messaging_mode != "message":
|
| 249 |
+
# We use random recommender, which means we pick a content randomly from their top list
|
| 250 |
+
# This approach will simply the process and we have unique recommendation for every single user.
|
| 251 |
+
Recommender = LLMR(CoreConfig, random=True)
|
| 252 |
+
CoreConfig = Recommender.get_recommendations()
|
| 253 |
+
|
| 254 |
+
else:
|
| 255 |
+
# We only want to generate the message and redirect them to For You section or Homepage
|
| 256 |
+
Recommender = DefaultRec(CoreConfig)
|
| 257 |
+
CoreConfig = Recommender.get_recommendations()
|
| 258 |
+
|
| 259 |
+
# Initialize message generator
|
| 260 |
+
message_generator = MessageGenerator(CoreConfig)
|
| 261 |
+
|
| 262 |
+
# Check if using agentic workflow
|
| 263 |
+
if message_generator.use_agentic_workflow:
|
| 264 |
+
# Agentic workflow: GeneratorAgent handles prompt generation internally
|
| 265 |
+
logger.info("Using agentic workflow for message generation")
|
| 266 |
+
else:
|
| 267 |
+
# Legacy workflow: Generate prompts separately
|
| 268 |
+
prompt = PromptGenerator(CoreConfig)
|
| 269 |
+
CoreConfig = prompt.generate_prompts()
|
| 270 |
+
|
| 271 |
+
# generating messages for each user
|
| 272 |
+
CoreConfig = message_generator.generate_messages()
|
| 273 |
+
|
| 274 |
+
# Eliminating rows where we don't have a valid message (null, empty, or whitespace only)
|
| 275 |
+
CoreConfig.users_df = CoreConfig.users_df[CoreConfig.users_df["message"].str.strip().astype(bool)]
|
| 276 |
+
# CoreConfig.checkpoint()
|
| 277 |
+
|
| 278 |
+
return CoreConfig.users_df
|
| 279 |
+
|
| 280 |
+
def _create_followup_personalized_message(self, CoreConfig, stage, test_mode, mode="production", ui_experiment_id=None):
|
| 281 |
+
"""
|
| 282 |
+
Generate follow up messages for users
|
| 283 |
+
:param CoreConfig:
|
| 284 |
+
:param stage:
|
| 285 |
+
:param test_mode:
|
| 286 |
+
:param mode: operating mode - "production", "test", or "ui"
|
| 287 |
+
:param ui_experiment_id: Optional experiment ID for UI mode
|
| 288 |
+
:return:
|
| 289 |
+
"""
|
| 290 |
+
# Read data from previous runs
|
| 291 |
+
datacollect = DataCollector(CoreConfig)
|
| 292 |
+
CoreConfig = datacollect.fetch_log_data(stage, test_mode, mode, ui_experiment_id)
|
| 293 |
+
|
| 294 |
+
if CoreConfig is None or len(CoreConfig.users_df) == 0:
|
| 295 |
+
print("No valid user at the moment")
|
| 296 |
+
return None
|
| 297 |
+
else:
|
| 298 |
+
# generating recommendations for users, if we want to include recommendations in the message
|
| 299 |
+
if CoreConfig.involve_recsys_result and CoreConfig.messaging_mode != "message":
|
| 300 |
+
# We use random recommender, which means we pick a content randomly from their top list
|
| 301 |
+
# This approach will simply the process and we have unique recommendation for every single user.
|
| 302 |
+
Recommender = LLMR(CoreConfig, random=True)
|
| 303 |
+
CoreConfig = Recommender.get_followup_recommendation()
|
| 304 |
+
|
| 305 |
+
else:
|
| 306 |
+
# We only want to generate the message and redirect them to For You section or Homepage
|
| 307 |
+
Recommender = DefaultRec(CoreConfig)
|
| 308 |
+
CoreConfig = Recommender.get_recommendations()
|
| 309 |
+
|
| 310 |
+
# Initialize message generator
|
| 311 |
+
message_generator = MessageGenerator(CoreConfig)
|
| 312 |
+
|
| 313 |
+
# Check if using agentic workflow
|
| 314 |
+
if message_generator.use_agentic_workflow:
|
| 315 |
+
# Agentic workflow: GeneratorAgent handles prompt generation internally
|
| 316 |
+
logger.info("Using agentic workflow for follow-up message generation")
|
| 317 |
+
else:
|
| 318 |
+
# Legacy workflow: Generate prompts separately
|
| 319 |
+
prompt = PromptGenerator(CoreConfig)
|
| 320 |
+
CoreConfig = prompt.generate_prompts(stage=stage)
|
| 321 |
+
|
| 322 |
+
# generating messages for each user
|
| 323 |
+
CoreConfig = message_generator.generate_messages(step=stage)
|
| 324 |
+
|
| 325 |
+
# Eliminating rows where we don't have a valid message (null, empty, or whitespace only)
|
| 326 |
+
CoreConfig.users_df = CoreConfig.users_df[CoreConfig.users_df["message"].str.strip().astype(bool)]
|
| 327 |
+
# CoreConfig.checkpoint()
|
| 328 |
+
|
| 329 |
+
return CoreConfig.users_df
|
| 330 |
+
|
| 331 |
+
# ======================= UI MODE HELPER FUNCTIONS =======================
|
| 332 |
+
|
| 333 |
+
def _store_to_csv_ui_mode(self, messages_df: pd.DataFrame, cost_df: pd.DataFrame, ui_experiment_id: str = None):
|
| 334 |
+
"""
|
| 335 |
+
Store messages and cost data to local CSV files in UI mode.
|
| 336 |
+
|
| 337 |
+
This function appends data to a single CSV file that grows with each stage,
|
| 338 |
+
similar to how Snowflake stores all stages in one table.
|
| 339 |
+
Uses UTF-8 encoding to properly support emojis and special characters.
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
messages_df: DataFrame containing generated messages
|
| 343 |
+
cost_df: DataFrame containing cost information
|
| 344 |
+
ui_experiment_id: Optional experiment ID for naming files (e.g., 'messages_a_drumeo_20260111_1756')
|
| 345 |
+
"""
|
| 346 |
+
# Ensure output directory exists
|
| 347 |
+
self.UI_OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
| 348 |
+
|
| 349 |
+
# Define output file paths
|
| 350 |
+
# Use experiment ID if provided (for AB testing), otherwise use default filename
|
| 351 |
+
if ui_experiment_id:
|
| 352 |
+
messages_file = self.UI_OUTPUT_DIR / f"{ui_experiment_id}.csv"
|
| 353 |
+
else:
|
| 354 |
+
messages_file = self.UI_OUTPUT_DIR / self.UI_OUTPUT_FILE
|
| 355 |
+
cost_file = self.UI_OUTPUT_DIR / "message_cost.csv"
|
| 356 |
+
|
| 357 |
+
try:
|
| 358 |
+
# Store messages (append mode for multi-stage campaigns)
|
| 359 |
+
# Use UTF-8-SIG encoding to support emojis with BOM for better compatibility
|
| 360 |
+
if messages_file.exists():
|
| 361 |
+
# Append to existing file (use utf-8 for append to avoid multiple BOMs)
|
| 362 |
+
messages_df.to_csv(messages_file, mode='a', header=False, index=False, encoding='utf-8')
|
| 363 |
+
logger.info(f"Appended {len(messages_df)} messages to {messages_file}")
|
| 364 |
+
else:
|
| 365 |
+
# Create new file with header and BOM
|
| 366 |
+
messages_df.to_csv(messages_file, mode='w', header=True, index=False, encoding='utf-8-sig')
|
| 367 |
+
logger.info(f"Created new messages file with {len(messages_df)} messages at {messages_file}")
|
| 368 |
+
|
| 369 |
+
# Store cost data (append mode)
|
| 370 |
+
if cost_file.exists():
|
| 371 |
+
cost_df.to_csv(cost_file, mode='a', header=False, index=False, encoding='utf-8')
|
| 372 |
+
else:
|
| 373 |
+
cost_df.to_csv(cost_file, mode='w', header=True, index=False, encoding='utf-8-sig')
|
| 374 |
+
|
| 375 |
+
print(f"✅ UI Mode: Stored {len(messages_df)} messages to {messages_file}")
|
| 376 |
+
logger.info(f"UI Mode: Successfully stored messages and cost data locally")
|
| 377 |
+
|
| 378 |
+
except Exception as e:
|
| 379 |
+
logger.error(f"Error storing data in UI mode: {str(e)}")
|
| 380 |
+
print(f"❌ Error storing data in UI mode: {str(e)}")
|
| 381 |
+
raise
|
| 382 |
+
|
| 383 |
+
@classmethod
|
| 384 |
+
def get_ui_output_path(cls):
|
| 385 |
+
"""
|
| 386 |
+
Get the path to UI output directory.
|
| 387 |
+
|
| 388 |
+
Returns:
|
| 389 |
+
Path: Path to UI output directory
|
| 390 |
+
"""
|
| 391 |
+
return cls.UI_OUTPUT_DIR
|
| 392 |
+
|
| 393 |
+
@classmethod
|
| 394 |
+
def clear_ui_output(cls):
|
| 395 |
+
"""
|
| 396 |
+
Clear all files in the UI output directory.
|
| 397 |
+
|
| 398 |
+
This should be called when starting a new UI run with fresh inputs.
|
| 399 |
+
"""
|
| 400 |
+
if cls.UI_OUTPUT_DIR.exists():
|
| 401 |
+
for file in cls.UI_OUTPUT_DIR.glob("*.csv"):
|
| 402 |
+
try:
|
| 403 |
+
file.unlink()
|
| 404 |
+
logger.info(f"Deleted {file.name} from UI output directory")
|
| 405 |
+
except Exception as e:
|
| 406 |
+
logger.warning(f"Could not delete {file.name}: {str(e)}")
|
| 407 |
+
print(f"🧹 Cleared UI output directory: {cls.UI_OUTPUT_DIR}")
|
| 408 |
+
else:
|
| 409 |
+
print(f"ℹ️ UI output directory does not exist yet: {cls.UI_OUTPUT_DIR}")
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
|
{Messaging_system → ai_messaging_system_v2/Messaging_system}/PromptGenerator.py
RENAMED
|
@@ -3,7 +3,6 @@ THis class generate proper prompts for the messaging system
|
|
| 3 |
"""
|
| 4 |
import pandas as pd
|
| 5 |
from tqdm import tqdm
|
| 6 |
-
from Messaging_system.PromptEng import PromptEngine
|
| 7 |
|
| 8 |
|
| 9 |
class PromptGenerator:
|
|
@@ -13,7 +12,7 @@ class PromptGenerator:
|
|
| 13 |
|
| 14 |
# --------------------------------------------------------------
|
| 15 |
# --------------------------------------------------------------
|
| 16 |
-
def generate_prompts(self):
|
| 17 |
"""
|
| 18 |
generates a personalized message for each student
|
| 19 |
:return:
|
|
@@ -25,9 +24,11 @@ class PromptGenerator:
|
|
| 25 |
# if we have personalized information about them, we generate a personalized prompt
|
| 26 |
for idx, row in tqdm(self.Core.users_df.iterrows(), desc="generating prompts"):
|
| 27 |
# check if we have enough information to generate a personalized message
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
| 31 |
self.Core.users_df.at[idx, "prompt"] = prompt
|
| 32 |
self.Core.users_df.at[idx, "source"] = "AI-generated"
|
| 33 |
|
|
@@ -40,21 +41,27 @@ class PromptGenerator:
|
|
| 40 |
|
| 41 |
# ==============================================================
|
| 42 |
def get_user_profile(self, user):
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
if self.Core.personalization:
|
| 45 |
user_info = f"""
|
| 46 |
|
| 47 |
-
### **Use below information from the user to make the final output more personalized if applicable.
|
| 48 |
-
Use these
|
| 49 |
|
| 50 |
- The user is a {str(self.Core.get_instrument())} student.
|
| 51 |
- {self.safe_get(self.Core.segment_info)}
|
| 52 |
-
-
|
| 53 |
-
- User profile --> use **indirectly** if it can improve personalization and **do not** use their preferences keywords ("genre, styles") directly in the message:
|
| 54 |
{self.safe_get(user.get("user_info"))}
|
| 55 |
"""
|
| 56 |
-
#
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
else:
|
| 60 |
|
|
@@ -79,17 +86,56 @@ Here is the information about the user:
|
|
| 79 |
#
|
| 80 |
# {self.safe_get(user.get("user_info"))}
|
| 81 |
|
|
|
|
| 82 |
|
| 83 |
# --------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
def generate_personalized_prompt(self, user):
|
| 85 |
"""
|
| 86 |
generate a personalized prompt by putting the information from the user into a template prompt
|
| 87 |
:return: Personalized prompt (string)
|
| 88 |
"""
|
| 89 |
input_context = self.input_context()
|
| 90 |
-
instructions = self.message_type_instructions()
|
| 91 |
user_info = self.get_user_profile(user=user)
|
| 92 |
|
|
|
|
|
|
|
|
|
|
| 93 |
recommendation_instructions = self.recommendations_instructions(user)
|
| 94 |
|
| 95 |
example_output = self.example_output()
|
|
@@ -99,13 +145,13 @@ Here is the information about the user:
|
|
| 99 |
prompt = f"""
|
| 100 |
{input_context}
|
| 101 |
|
| 102 |
-
{
|
| 103 |
|
| 104 |
{user_info}
|
| 105 |
|
| 106 |
-
{
|
| 107 |
|
| 108 |
-
{
|
| 109 |
|
| 110 |
{output_instructions}
|
| 111 |
"""
|
|
@@ -121,16 +167,9 @@ Here is the information about the user:
|
|
| 121 |
|
| 122 |
if self.Core.personalization:
|
| 123 |
context = f"""
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
**
|
| 127 |
-
• Produce two fields: "header" and "message".
|
| 128 |
-
• Keep the header < {self.Core.config_file["header_limit"]} chars and the message < {self.Core.config_file["message_limit"]}chars.
|
| 129 |
-
|
| 130 |
-
**Voice & Style**
|
| 131 |
-
• Sounds like a friend texting a tip.
|
| 132 |
-
• No hype, no sales language, no “AI‑speak.”
|
| 133 |
-
• If you add an emoji, use {self.Core.get_emoji()} exactly once, at the *end* of either the header *or* message (never both).
|
| 134 |
"""
|
| 135 |
|
| 136 |
else:
|
|
@@ -151,6 +190,7 @@ DO NOT **change** or **modify** or **add to** the selected option in any shape o
|
|
| 151 |
:return:
|
| 152 |
"""
|
| 153 |
|
|
|
|
| 154 |
instructions_for_recsys = f"""
|
| 155 |
### ** Recommendation Personalization Guidelines **
|
| 156 |
|
|
@@ -159,27 +199,31 @@ Below is the content we want to recommend to the user:
|
|
| 159 |
→ Recommended Content Details:
|
| 160 |
{user["recommendation_info"]}
|
| 161 |
|
| 162 |
-
When incorporating this content into the message, follow these guidelines to keep the message friendly, relevant, and casual (not too scripted):
|
| 163 |
|
| 164 |
1. **Title Usage**:
|
| 165 |
-
- Refer to the **CONTENT_TITLE** naturally in the message — paraphrase or describe it, but do *not* quote it or use it verbatim.
|
| 166 |
- Avoid making it feel like a promotion; frame it as something that *might interest* or *help* the user.
|
| 167 |
|
| 168 |
2. **Content Type Context**:
|
| 169 |
-
- Mention the **CONTENT_TYPE** (e.g., course, workout
|
| 170 |
|
| 171 |
|
| 172 |
3. **Artist/Instructor Name**:
|
| 173 |
- If the full name of the **ARTIST** is available, mention it casually if appropriate (e.g., "led by Jordan Mitchell").
|
| 174 |
- If only the first name is known, do *not* include it in the message at all.
|
|
|
|
| 175 |
|
| 176 |
4. **Tone & Style**:
|
| 177 |
- Keep the tone light, supportive, and personal — like a helpful suggestion from a friend.
|
| 178 |
-
- Avoid sounding pushy
|
| 179 |
|
| 180 |
5. **Flexibility**:
|
| 181 |
- You don’t need to include all elements every time. Prioritize what feels most relevant and natural based on the context.
|
| 182 |
|
|
|
|
|
|
|
|
|
|
| 183 |
Goal: Make the recommendation feel personalized and casually relevant — not generic or copy-pasted.
|
| 184 |
"""
|
| 185 |
|
|
@@ -195,22 +239,22 @@ Goal: Make the recommendation feel personalized and casually relevant — not ge
|
|
| 195 |
:return: output instructions as a string
|
| 196 |
"""
|
| 197 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
instructions = f"""
|
| 199 |
-
|
| 200 |
### **Output instructions**:
|
|
|
|
| 201 |
|
| 202 |
**Expected output structure:**
|
| 203 |
|
| 204 |
-
thoughts:
|
| 205 |
-
1. Generate a header less than {self.Core.config_file["header_limit"]} characters.
|
| 206 |
-
2. Generate a message less than {self.Core.config_file["message_limit"]} characters.
|
| 207 |
-
3. Ensure that the output is a valid JSON following below structure.
|
| 208 |
-
|
| 209 |
{{
|
| 210 |
-
"header": "final header",
|
| 211 |
-
"message": "final message",
|
| 212 |
}}
|
| 213 |
|
|
|
|
| 214 |
"""
|
| 215 |
|
| 216 |
return instructions
|
|
@@ -230,66 +274,52 @@ thoughts:
|
|
| 230 |
else:
|
| 231 |
# one shot prompting
|
| 232 |
example = f"""
|
| 233 |
-
Below are
|
| 234 |
|
| 235 |
-
### **
|
| 236 |
{self.Core.sample_example}
|
| 237 |
"""
|
| 238 |
|
| 239 |
return example
|
| 240 |
|
| 241 |
-
|
|
|
|
| 242 |
"""
|
| 243 |
-
|
| 244 |
-
|
|
|
|
|
|
|
| 245 |
"""
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
-
instructions
|
| 248 |
-
|
| 249 |
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
- No exclamation marks in the header; one is OK in the message if it feels natural.
|
| 258 |
|
| 259 |
-
|
| 260 |
-
|
| 261 |
|
|
|
|
|
|
|
| 262 |
|
| 263 |
-
|
| 264 |
-
instructions = f"""
|
| 265 |
-
Message Specifications:
|
| 266 |
-
- The message is an **in app notification**.
|
| 267 |
-
- ** Keep the First sentence as "header" that should be a short personalized eye catching sentence less than 40 character **.
|
| 268 |
-
- ** For the "header", don't use exclamation mark at the end, instead, use a space following with a proper emoji at the end of the "header" (e.g. Great work John 😍) **
|
| 269 |
-
- **Keep the message concise and straightforward**.
|
| 270 |
-
- **Start directly with the message content**; do not include greetings (e.g., "Hello") or closing phrases.
|
| 271 |
-
- Make the message highly **personalized** and **eye-catching**.
|
| 272 |
-
- "Personalized" means the user should feel the message is specifically crafted for them and not generic.
|
| 273 |
-
- **Every word should contribute to maximizing impact and engagement**.
|
| 274 |
-
- {message_style}
|
| 275 |
-
"""
|
| 276 |
|
| 277 |
-
|
| 278 |
|
| 279 |
-
|
| 280 |
-
def message_instructions(self):
|
| 281 |
-
"""
|
| 282 |
-
defines the style of the message: e.g. friendly, kind, tone, etc.
|
| 283 |
-
:return: style_instructions(str)
|
| 284 |
-
"""
|
| 285 |
|
| 286 |
-
|
| 287 |
-
message_style = ""
|
| 288 |
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
** Important instructions**
|
| 292 |
-
- {self.Core.message_style}.
|
| 293 |
-
"""
|
| 294 |
|
| 295 |
-
return
|
|
|
|
| 3 |
"""
|
| 4 |
import pandas as pd
|
| 5 |
from tqdm import tqdm
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
class PromptGenerator:
|
|
|
|
| 12 |
|
| 13 |
# --------------------------------------------------------------
|
| 14 |
# --------------------------------------------------------------
|
| 15 |
+
def generate_prompts(self, stage=1):
|
| 16 |
"""
|
| 17 |
generates a personalized message for each student
|
| 18 |
:return:
|
|
|
|
| 24 |
# if we have personalized information about them, we generate a personalized prompt
|
| 25 |
for idx, row in tqdm(self.Core.users_df.iterrows(), desc="generating prompts"):
|
| 26 |
# check if we have enough information to generate a personalized message
|
| 27 |
+
if stage == 1:
|
| 28 |
+
prompt = self.generate_personalized_prompt(user=row)
|
| 29 |
+
else:
|
| 30 |
+
prompt = self.generate_follow_up_prompt(user=row)
|
| 31 |
+
|
| 32 |
self.Core.users_df.at[idx, "prompt"] = prompt
|
| 33 |
self.Core.users_df.at[idx, "source"] = "AI-generated"
|
| 34 |
|
|
|
|
| 41 |
|
| 42 |
# ==============================================================
|
| 43 |
def get_user_profile(self, user):
|
| 44 |
+
"""
|
| 45 |
+
getting personalized information about users (e.g. preferences)
|
| 46 |
+
:param user:
|
| 47 |
+
:return:
|
| 48 |
+
"""
|
| 49 |
if self.Core.personalization:
|
| 50 |
user_info = f"""
|
| 51 |
|
| 52 |
+
### **Use below information from the user to **modify** the selected header and message (without any change in style, tune and content) and make the final output more personalized if applicable.
|
| 53 |
+
Use these only to *flavour* the existing header and message. Never add new sentences just to stuff profile data.
|
| 54 |
|
| 55 |
- The user is a {str(self.Core.get_instrument())} student.
|
| 56 |
- {self.safe_get(self.Core.segment_info)}
|
| 57 |
+
- User profile --> Only use **indirectly** if it can improve personalization (Don't use their preferred genre or styles directly in the message:
|
|
|
|
| 58 |
{self.safe_get(user.get("user_info"))}
|
| 59 |
"""
|
| 60 |
+
# birth_day_instructions = self.birth_day_instructions(user)
|
| 61 |
+
# if birth_day_instructions is not None:
|
| 62 |
+
# user_info += "\n" + birth_day_instructions
|
| 63 |
+
## eliminate:
|
| 64 |
+
# - first name: {self.safe_get(user.get("first_name"))} --> Only use if is available and the first name is a **valid name**
|
| 65 |
|
| 66 |
else:
|
| 67 |
|
|
|
|
| 86 |
#
|
| 87 |
# {self.safe_get(user.get("user_info"))}
|
| 88 |
|
| 89 |
+
return user_info
|
| 90 |
|
| 91 |
# --------------------------------------------------------------
|
| 92 |
+
def get_additional_instructions(self):
|
| 93 |
+
"""
|
| 94 |
+
Generates additional instructions section from campaign-wide and per-message instructions.
|
| 95 |
+
Injected after user profile section as specified.
|
| 96 |
+
|
| 97 |
+
:return: Formatted instructions string or empty string if no instructions
|
| 98 |
+
"""
|
| 99 |
+
instructions_parts = []
|
| 100 |
+
|
| 101 |
+
# Add campaign-wide instructions if available
|
| 102 |
+
if self.Core.campaign_instructions:
|
| 103 |
+
instructions_parts.append(f"""### **Campaign Instructions**
|
| 104 |
+
{self.Core.campaign_instructions}""")
|
| 105 |
+
|
| 106 |
+
# Add per-message (stage-specific) instructions if available
|
| 107 |
+
if self.Core.per_message_instructions:
|
| 108 |
+
instructions_parts.append(f"""### **Additional Instructions for This Message**
|
| 109 |
+
{self.Core.per_message_instructions}""")
|
| 110 |
+
|
| 111 |
+
# Combine all instructions
|
| 112 |
+
if instructions_parts:
|
| 113 |
+
return "\n\n".join(instructions_parts)
|
| 114 |
+
else:
|
| 115 |
+
return ""
|
| 116 |
+
|
| 117 |
+
# --------------------------------------------------------------
|
| 118 |
+
def birth_day_instructions(self, user):
|
| 119 |
+
# Birthday reminder
|
| 120 |
+
if pd.notna(user["birthday_reminder"]) and user["birthday_reminder"] not in [None, [], {}]:
|
| 121 |
+
instructions = f"""
|
| 122 |
+
- **Include a short message to remind them that their birthday is coming up.**: {str(user["birthday_reminder"])} Days until their birthday.
|
| 123 |
+
"""
|
| 124 |
+
return instructions
|
| 125 |
+
else:
|
| 126 |
+
return None
|
| 127 |
+
|
| 128 |
def generate_personalized_prompt(self, user):
|
| 129 |
"""
|
| 130 |
generate a personalized prompt by putting the information from the user into a template prompt
|
| 131 |
:return: Personalized prompt (string)
|
| 132 |
"""
|
| 133 |
input_context = self.input_context()
|
|
|
|
| 134 |
user_info = self.get_user_profile(user=user)
|
| 135 |
|
| 136 |
+
# NEW: Get additional instructions (campaign-wide + per-message)
|
| 137 |
+
additional_instructions = self.get_additional_instructions()
|
| 138 |
+
|
| 139 |
recommendation_instructions = self.recommendations_instructions(user)
|
| 140 |
|
| 141 |
example_output = self.example_output()
|
|
|
|
| 145 |
prompt = f"""
|
| 146 |
{input_context}
|
| 147 |
|
| 148 |
+
{example_output}
|
| 149 |
|
| 150 |
{user_info}
|
| 151 |
|
| 152 |
+
{additional_instructions}
|
| 153 |
|
| 154 |
+
{recommendation_instructions}
|
| 155 |
|
| 156 |
{output_instructions}
|
| 157 |
"""
|
|
|
|
| 167 |
|
| 168 |
if self.Core.personalization:
|
| 169 |
context = f"""
|
| 170 |
+
Your task is to select the best 'header' and a 'message' for a {self.Core.get_instrument()} student as a push notification.
|
| 171 |
+
Based on the user instructions, you might need to **modify the selected option** very minimal and slightly to improve personalization if capable.
|
| 172 |
+
**Important Note**: header < {self.Core.config_file["header_limit"]} and message < {self.Core.config_file["message_limit"]} characters.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
"""
|
| 174 |
|
| 175 |
else:
|
|
|
|
| 190 |
:return:
|
| 191 |
"""
|
| 192 |
|
| 193 |
+
|
| 194 |
instructions_for_recsys = f"""
|
| 195 |
### ** Recommendation Personalization Guidelines **
|
| 196 |
|
|
|
|
| 199 |
→ Recommended Content Details:
|
| 200 |
{user["recommendation_info"]}
|
| 201 |
|
| 202 |
+
When incorporating this content into the message and header, follow these guidelines to keep the header and message friendly, relevant, and casual (not too scripted):
|
| 203 |
|
| 204 |
1. **Title Usage**:
|
| 205 |
+
- Refer to the **CONTENT_TITLE** or content details naturally in the message — paraphrase or describe it, but do *not* quote it or use it verbatim.
|
| 206 |
- Avoid making it feel like a promotion; frame it as something that *might interest* or *help* the user.
|
| 207 |
|
| 208 |
2. **Content Type Context**:
|
| 209 |
+
- Mention the **CONTENT_TYPE** (e.g., course, workout) only if it flows naturally in the message.
|
| 210 |
|
| 211 |
|
| 212 |
3. **Artist/Instructor Name**:
|
| 213 |
- If the full name of the **ARTIST** is available, mention it casually if appropriate (e.g., "led by Jordan Mitchell").
|
| 214 |
- If only the first name is known, do *not* include it in the message at all.
|
| 215 |
+
- **DO NOT ASSUME or HALLUCINATE Artist name based on previous messages**, only refer to Recommended Content Details provided.
|
| 216 |
|
| 217 |
4. **Tone & Style**:
|
| 218 |
- Keep the tone light, supportive, and personal — like a helpful suggestion from a friend.
|
| 219 |
+
- Avoid sounding pushy, overly promotional, or marketing pitch.
|
| 220 |
|
| 221 |
5. **Flexibility**:
|
| 222 |
- You don’t need to include all elements every time. Prioritize what feels most relevant and natural based on the context.
|
| 223 |
|
| 224 |
+
6. **Time Reference**:
|
| 225 |
+
- NEVER use time-related words (“new,” “recent,” “latest,” etc.) and Never imply recency of the content in any way.
|
| 226 |
+
|
| 227 |
Goal: Make the recommendation feel personalized and casually relevant — not generic or copy-pasted.
|
| 228 |
"""
|
| 229 |
|
|
|
|
| 239 |
:return: output instructions as a string
|
| 240 |
"""
|
| 241 |
|
| 242 |
+
general_instructions = f"""
|
| 243 |
+
- Ensure that the output is a valid JSON following above structure.
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
instructions = f"""
|
|
|
|
| 247 |
### **Output instructions**:
|
| 248 |
+
- header < {self.Core.config_file["header_limit"]} and message < {self.Core.config_file["message_limit"]} characters.
|
| 249 |
|
| 250 |
**Expected output structure:**
|
| 251 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
{{
|
| 253 |
+
"header": "final header considering instructions",
|
| 254 |
+
"message": "final message considering instructions",
|
| 255 |
}}
|
| 256 |
|
| 257 |
+
{general_instructions}
|
| 258 |
"""
|
| 259 |
|
| 260 |
return instructions
|
|
|
|
| 274 |
else:
|
| 275 |
# one shot prompting
|
| 276 |
example = f"""
|
| 277 |
+
Below are the available options to select the header and message for the push notification:
|
| 278 |
|
| 279 |
+
### **Available options:**
|
| 280 |
{self.Core.sample_example}
|
| 281 |
"""
|
| 282 |
|
| 283 |
return example
|
| 284 |
|
| 285 |
+
# =============================================================
|
| 286 |
+
def generate_follow_up_prompt(self, user):
|
| 287 |
"""
|
| 288 |
+
Creates a prompt to feed to the LLM, incorporating 3 previously generated messages.
|
| 289 |
+
|
| 290 |
+
:param previous_messages: A list of dicts, each containing 'header' and 'message'.
|
| 291 |
+
:return: A user-facing prompt string instructing the model to produce a new message.
|
| 292 |
"""
|
| 293 |
+
previous_text_str = str(user["previous_messages"])
|
| 294 |
+
user_info = self.get_user_profile(user=user)
|
| 295 |
+
input_context = self.input_context()
|
| 296 |
|
| 297 |
+
# NEW: Get additional instructions (campaign-wide + per-message)
|
| 298 |
+
additional_instructions = self.get_additional_instructions()
|
| 299 |
|
| 300 |
+
recommendation_instructions = self.recommendations_instructions(user)
|
| 301 |
+
output_instructions = self.output_instruction()
|
| 302 |
+
examples = self.example_output()
|
| 303 |
|
| 304 |
+
# Craft the prompt
|
| 305 |
+
prompt = f"""
|
| 306 |
+
We have previously sent these push notifications to the user and The user has not re-engaged yet:
|
|
|
|
| 307 |
|
| 308 |
+
** Previous messages **
|
| 309 |
+
{previous_text_str}
|
| 310 |
|
| 311 |
+
{input_context}
|
| 312 |
+
- The new selection should be different from previous headers and messages and we should not have similar words and phrases from previous sends.
|
| 313 |
|
| 314 |
+
{examples}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
|
| 316 |
+
{user_info}
|
| 317 |
|
| 318 |
+
{additional_instructions}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 319 |
|
| 320 |
+
{recommendation_instructions}
|
|
|
|
| 321 |
|
| 322 |
+
{output_instructions}
|
| 323 |
+
"""
|
|
|
|
|
|
|
|
|
|
| 324 |
|
| 325 |
+
return prompt
|
ai_messaging_system_v2/Messaging_system/agents/README.md
ADDED
|
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Agentic Workflow System
|
| 2 |
+
|
| 3 |
+
## Overview
|
| 4 |
+
|
| 5 |
+
The Agentic Workflow System is a multi-agent architecture designed to enhance the quality and accuracy of personalized push notification messages. It replaces the single-step LLM generation with a sophisticated two-agent system that includes generation, validation, and iterative refinement.
|
| 6 |
+
|
| 7 |
+
## Architecture
|
| 8 |
+
|
| 9 |
+
### System Components
|
| 10 |
+
|
| 11 |
+
```
|
| 12 |
+
┌─────────────────────────────────────────────────────────────┐
|
| 13 |
+
│ AgentOrchestrator │
|
| 14 |
+
│ (Manages workflow, feedback loops, rejection logging) │
|
| 15 |
+
└───────┬─────────────────────────────────────────┬───────────┘
|
| 16 |
+
│ │
|
| 17 |
+
▼ ▼
|
| 18 |
+
┌──────────────────┐ ┌──────────────────┐
|
| 19 |
+
│ GeneratorAgent │◄───── Feedback ────│ SecurityAgent │
|
| 20 |
+
│ │ │ │
|
| 21 |
+
│ - Prompt Gen │ │ - Rule-based │
|
| 22 |
+
│ - LLM Call │ │ - LLM-based │
|
| 23 |
+
│ - Instructions │ │ - Validation │
|
| 24 |
+
└──────────────────┘ └──────────────────┘
|
| 25 |
+
│ │
|
| 26 |
+
└─────────► Message ─────────► │
|
| 27 |
+
│
|
| 28 |
+
┌─────────▼─────────┐
|
| 29 |
+
│ RejectionLogger │
|
| 30 |
+
│ (CSV Logs) │
|
| 31 |
+
└───────────────────┘
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
### Key Features
|
| 35 |
+
|
| 36 |
+
1. **Multi-Agent System**: Separates generation from validation for improved quality control
|
| 37 |
+
2. **Feedback Loop**: Up to 3 attempts with detailed feedback for regeneration
|
| 38 |
+
3. **Rule-Based + LLM Validation**: Fast rule-based checks followed by focused LLM validation (instruction adherence, content accuracy, authenticity)
|
| 39 |
+
4. **Rejection Logging**: Tracks all rejections with detailed information for evaluation
|
| 40 |
+
5. **Scalable Architecture**: Easy to add new agents and validation rules
|
| 41 |
+
6. **Enhanced Prompt Engineering**: Different strategies for instructions vs. examples
|
| 42 |
+
7. **Conditional Prompts**: Support for dynamic prompt injection (e.g., birthday reminders)
|
| 43 |
+
8. **Smart LLM Integration**: Multi-mode validation system with automatic retries and fallback strategies
|
| 44 |
+
|
| 45 |
+
### LLM Integration & Validation Modes
|
| 46 |
+
|
| 47 |
+
The agentic system integrates seamlessly with the existing `LLM.py` infrastructure through a sophisticated validation mode system:
|
| 48 |
+
|
| 49 |
+
**Three Validation Modes**:
|
| 50 |
+
1. **`message_generation`** (for GeneratorAgent):
|
| 51 |
+
- Validates presence of `header` and `message` keys
|
| 52 |
+
- Enforces character limits (header < 30, message < 110)
|
| 53 |
+
- Automatically retries up to 6 times (Google) or 5 times (OpenAI)
|
| 54 |
+
- Provides detailed error messages for each retry
|
| 55 |
+
|
| 56 |
+
2. **`validation_response`** (for SecurityAgent):
|
| 57 |
+
- Validates presence of `approved` key
|
| 58 |
+
- Expects response format: `{"approved": true/false, "issues": [...], "feedback": "..."}`
|
| 59 |
+
- Used for LLM-based quality validation
|
| 60 |
+
|
| 61 |
+
3. **`generic_json`** (for future agents):
|
| 62 |
+
- Only validates JSON syntax
|
| 63 |
+
- No specific key requirements
|
| 64 |
+
- Flexible for custom agent responses
|
| 65 |
+
|
| 66 |
+
**Benefits**:
|
| 67 |
+
- Rule-based validation happens **before** LLM calls, catching most issues early
|
| 68 |
+
- Each agent uses the correct validation for its response format
|
| 69 |
+
- No false rejections due to mismatched validation expectations
|
| 70 |
+
- Automatic retry logic with detailed feedback reduces waste
|
| 71 |
+
|
| 72 |
+
## Agents
|
| 73 |
+
|
| 74 |
+
### 1. BaseAgent (Abstract Class)
|
| 75 |
+
|
| 76 |
+
The foundation for all agents, providing common interface and utilities.
|
| 77 |
+
|
| 78 |
+
**Location**: `base_agent.py`
|
| 79 |
+
|
| 80 |
+
**Key Methods**:
|
| 81 |
+
- `execute(context)`: Abstract method that all agents must implement
|
| 82 |
+
- `log_info/warning/error()`: Logging utilities
|
| 83 |
+
- `validate_context()`: Context validation helper
|
| 84 |
+
|
| 85 |
+
**Purpose**: Ensures consistency across agents and makes it easy to add new agents.
|
| 86 |
+
|
| 87 |
+
### 2. GeneratorAgent
|
| 88 |
+
|
| 89 |
+
Generates personalized messages using cutting-edge prompt engineering.
|
| 90 |
+
|
| 91 |
+
**Location**: `generator_agent.py`
|
| 92 |
+
|
| 93 |
+
**Responsibilities**:
|
| 94 |
+
- Generate prompts with adaptive strategies based on instructions/examples
|
| 95 |
+
- Call LLM to create headers and messages
|
| 96 |
+
- Handle feedback from SecurityAgent for regeneration
|
| 97 |
+
- Support conditional prompt injection (birthday reminders, etc.)
|
| 98 |
+
|
| 99 |
+
**Prompt Engineering Strategies**:
|
| 100 |
+
|
| 101 |
+
| Scenario | Strategy |
|
| 102 |
+
|----------|----------|
|
| 103 |
+
| Instructions + Examples | Prioritize instructions, use examples for style/voice |
|
| 104 |
+
| Only Examples | Analyze examples to understand brand vocabulary, style, voice; create personalized message matching that style |
|
| 105 |
+
| Only Instructions | Follow instructions without example reference |
|
| 106 |
+
| Neither | Basic prompt with user data |
|
| 107 |
+
|
| 108 |
+
**Key Features**:
|
| 109 |
+
- Handles both initial messages (stage 1) and follow-up messages (stages 2-11)
|
| 110 |
+
- Checks previous messages to avoid repetition
|
| 111 |
+
- Integrates user profile, recommendations, and conditional prompts
|
| 112 |
+
- Enforces character limits and banned phrase avoidance
|
| 113 |
+
|
| 114 |
+
### 3. SecurityAgent
|
| 115 |
+
|
| 116 |
+
Validates generated messages as a quality firewall.
|
| 117 |
+
|
| 118 |
+
**Location**: `security_agent.py`
|
| 119 |
+
|
| 120 |
+
**Responsibilities**:
|
| 121 |
+
- Perform fast rule-based validation
|
| 122 |
+
- Conduct focused LLM-based validation (instruction adherence, content accuracy, authenticity)
|
| 123 |
+
- Provide detailed feedback for regeneration
|
| 124 |
+
- Approve or reject messages
|
| 125 |
+
|
| 126 |
+
**Validation Pipeline**:
|
| 127 |
+
|
| 128 |
+
```
|
| 129 |
+
Input Message
|
| 130 |
+
│
|
| 131 |
+
▼
|
| 132 |
+
┌─────────────────────┐
|
| 133 |
+
│ Rule-Based Checks │ (Fast - <1ms)
|
| 134 |
+
│ - Character limits │
|
| 135 |
+
│ - Empty content │
|
| 136 |
+
│ - Banned phrases │
|
| 137 |
+
│ - Placeholders │
|
| 138 |
+
│ - Singeo phrases │
|
| 139 |
+
└──────┬──────────────┘
|
| 140 |
+
│
|
| 141 |
+
▼ (If passes)
|
| 142 |
+
┌─────────────────────┐
|
| 143 |
+
│ LLM-Based Checks │ (Focused & Concise)
|
| 144 |
+
│ - Instruction │
|
| 145 |
+
│ adherence │
|
| 146 |
+
│ - Content accuracy │
|
| 147 |
+
│ - Authenticity │
|
| 148 |
+
│ - Time words │
|
| 149 |
+
│ - Similarity │
|
| 150 |
+
└──────┬──────────────┘
|
| 151 |
+
│
|
| 152 |
+
▼
|
| 153 |
+
Approved ✓
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
**Validation Criteria**:
|
| 157 |
+
|
| 158 |
+
1. **Rule-Based** (Fast - catches obvious issues before LLM):
|
| 159 |
+
- **Character limits**: header < 30, message < 110
|
| 160 |
+
- **Non-empty content**: Both header and message must have text
|
| 161 |
+
- **Banned phrases**: No AI jargon or brand-specific banned phrases
|
| 162 |
+
- **Placeholder detection**: No template variables like [user_name], {name}, {{var}}
|
| 163 |
+
- **Singeo-specific**: No "your instrument" phrases (vocals aren't instruments)
|
| 164 |
+
|
| 165 |
+
2. **LLM-Based** (Context-aware validation):
|
| 166 |
+
- **Instruction adherence**: Follows provided campaign/message instructions
|
| 167 |
+
- **Content accuracy**: Correct artist/content names, no hallucinations
|
| 168 |
+
- **Authenticity**: Sounds human, not robotic
|
| 169 |
+
- **Time words**: Distinguishes between content recency (REJECT: "new course") vs action timing (OK: "practice today")
|
| 170 |
+
- **Similarity check**: For follow-ups, ensures message doesn't sound too similar to last 2 messages (focuses on structure/tone, not just words)
|
| 171 |
+
- Note: Be lenient - only reject OBVIOUS problems
|
| 172 |
+
|
| 173 |
+
**Recent Improvements (Jan 2026)**:
|
| 174 |
+
|
| 175 |
+
1. **Placeholder Detection**:
|
| 176 |
+
- Catches template variables like [user_name], {name}, {{var}} before they reach users
|
| 177 |
+
- Prevents hallucination where LLM generates placeholder strings instead of actual content
|
| 178 |
+
|
| 179 |
+
2. **Smart Time-Word Validation**:
|
| 180 |
+
- Moved from rigid rule-based to context-aware LLM validation
|
| 181 |
+
- Distinguishes "practice today" (action timing ✓) from "new course" (content recency ✗)
|
| 182 |
+
- Prevents false claims that recommended content is new
|
| 183 |
+
|
| 184 |
+
3. **Singeo Brand Protection**:
|
| 185 |
+
- Special check for "your instrument" phrases when brand is Singeo
|
| 186 |
+
- Prevents awkward phrasing since vocals/singing are not instruments
|
| 187 |
+
- Uses natural language like "practice" or "continue learning"
|
| 188 |
+
|
| 189 |
+
4. **Message Similarity Detection**:
|
| 190 |
+
- Compares against last 2 previous messages for follow-ups
|
| 191 |
+
- Focuses on overall impression/structure, not just word overlap
|
| 192 |
+
- Example: "Ready to sing?" vs "Ready to practice?" = too similar (same feel)
|
| 193 |
+
- Ensures fresh, varied messaging across campaign stages
|
| 194 |
+
|
| 195 |
+
5. **Simplified Validation Prompts**:
|
| 196 |
+
- Reduced from ~80 lines to ~25 lines
|
| 197 |
+
- More concise, direct, and effective
|
| 198 |
+
- Faster processing with better clarity
|
| 199 |
+
- All validation criteria maintained
|
| 200 |
+
|
| 201 |
+
6. **Brand-Specific Labeling Prevention**:
|
| 202 |
+
- Banned labels across all brands: "drummer", "guitarist", "pianist", "singer"
|
| 203 |
+
- Uses "learning {instrument}" phrasing instead of "{instrument} student"
|
| 204 |
+
- Natural, non-labeling language that encourages without pushing
|
| 205 |
+
|
| 206 |
+
### 4. AgentOrchestrator
|
| 207 |
+
|
| 208 |
+
Manages the workflow between agents and handles feedback loops.
|
| 209 |
+
|
| 210 |
+
**Location**: `agent_orchestrator.py`
|
| 211 |
+
|
| 212 |
+
**Responsibilities**:
|
| 213 |
+
- Coordinate GeneratorAgent and SecurityAgent
|
| 214 |
+
- Implement feedback loop (max 3 attempts)
|
| 215 |
+
- Manage rejection logging
|
| 216 |
+
- Return approved message or None after exhausting attempts
|
| 217 |
+
|
| 218 |
+
**Workflow**:
|
| 219 |
+
|
| 220 |
+
```
|
| 221 |
+
For each user:
|
| 222 |
+
For attempt in [1, 2, 3]:
|
| 223 |
+
1. GeneratorAgent.execute()
|
| 224 |
+
├─ Success? Continue
|
| 225 |
+
└─ Failure? Log & retry
|
| 226 |
+
|
| 227 |
+
2. SecurityAgent.execute()
|
| 228 |
+
├─ Approved? Return message
|
| 229 |
+
└─ Rejected? Log & provide feedback
|
| 230 |
+
|
| 231 |
+
3. Use feedback for next attempt
|
| 232 |
+
|
| 233 |
+
If all attempts fail:
|
| 234 |
+
Return None (user gets no message)
|
| 235 |
+
```
|
| 236 |
+
|
| 237 |
+
### 5. RejectionLogger
|
| 238 |
+
|
| 239 |
+
Logs all rejections to CSV files for evaluation and optimization.
|
| 240 |
+
|
| 241 |
+
**Location**: `rejection_logger.py`
|
| 242 |
+
|
| 243 |
+
**CSV Format**:
|
| 244 |
+
- `timestamp`: When the rejection occurred
|
| 245 |
+
- `user_id`: User identifier
|
| 246 |
+
- `attempt_number`: Which attempt (1, 2, or 3)
|
| 247 |
+
- `rejection_reason`: Brief reason for rejection
|
| 248 |
+
- `validation_type`: rule_based or llm_based
|
| 249 |
+
- `detailed_feedback`: Detailed feedback for improvement
|
| 250 |
+
- `generated_header`: The rejected header
|
| 251 |
+
- `generated_message`: The rejected message
|
| 252 |
+
- `header_length`: Header character count
|
| 253 |
+
- `message_length`: Message character count
|
| 254 |
+
- `model_used`: LLM model that generated the message
|
| 255 |
+
- `prompt_excerpt`: First 200 chars of prompt
|
| 256 |
+
- `recommendation_info`: Content recommendation details
|
| 257 |
+
- `has_instructions`: Whether instructions were provided
|
| 258 |
+
- `has_examples`: Whether examples were provided
|
| 259 |
+
|
| 260 |
+
**File Naming**: `{brand}_{campaign_name}_stage{stage}_{timestamp}_rejections.csv`
|
| 261 |
+
|
| 262 |
+
**Storage Location**: `ai_messaging_system_v2/logs/rejections/`
|
| 263 |
+
|
| 264 |
+
**Use Cases**:
|
| 265 |
+
- Evaluate LLM performance
|
| 266 |
+
- Identify common rejection patterns
|
| 267 |
+
- Optimize prompts and instructions
|
| 268 |
+
- Track improvement over time
|
| 269 |
+
- A/B testing different strategies
|
| 270 |
+
|
| 271 |
+
## Usage
|
| 272 |
+
|
| 273 |
+
### Basic Usage
|
| 274 |
+
|
| 275 |
+
The agentic workflow is enabled by default in `MessageGenerator`:
|
| 276 |
+
|
| 277 |
+
```python
|
| 278 |
+
from Messaging_system.Permes import Permes
|
| 279 |
+
|
| 280 |
+
permes = Permes()
|
| 281 |
+
users_df = permes.create_personalize_messages(
|
| 282 |
+
session=session,
|
| 283 |
+
users=users,
|
| 284 |
+
brand="drumeo",
|
| 285 |
+
config_file=system_config,
|
| 286 |
+
stage=1,
|
| 287 |
+
campaign_name="re_engagement",
|
| 288 |
+
campaign_instructions="Keep messages encouraging",
|
| 289 |
+
per_message_instructions="Focus on the recommended content"
|
| 290 |
+
)
|
| 291 |
+
```
|
| 292 |
+
|
| 293 |
+
### Switching to Legacy Mode
|
| 294 |
+
|
| 295 |
+
To disable the agentic workflow and use the legacy single-step generation:
|
| 296 |
+
|
| 297 |
+
```python
|
| 298 |
+
from Messaging_system.Message_generator import MessageGenerator
|
| 299 |
+
|
| 300 |
+
message_generator = MessageGenerator(core_config)
|
| 301 |
+
message_generator.use_agentic_workflow = False # Disable agents
|
| 302 |
+
core_config = message_generator.generate_messages(step=1)
|
| 303 |
+
```
|
| 304 |
+
|
| 305 |
+
### Accessing Rejection Logs
|
| 306 |
+
|
| 307 |
+
After generation, rejection logs are automatically saved:
|
| 308 |
+
|
| 309 |
+
```python
|
| 310 |
+
# Log location is printed in console output
|
| 311 |
+
# Example: ai_messaging_system_v2/logs/rejections/drumeo_re_engagement_stage1_20250130_143022_rejections.csv
|
| 312 |
+
|
| 313 |
+
import pandas as pd
|
| 314 |
+
|
| 315 |
+
# Read rejection log
|
| 316 |
+
rejections = pd.read_csv("path/to/rejection_log.csv")
|
| 317 |
+
|
| 318 |
+
# Analyze common rejection reasons
|
| 319 |
+
print(rejections["rejection_reason"].value_counts())
|
| 320 |
+
|
| 321 |
+
# Filter by validation type
|
| 322 |
+
rule_based = rejections[rejections["validation_type"] == "rule_based"]
|
| 323 |
+
llm_based = rejections[rejections["validation_type"] == "llm_based"]
|
| 324 |
+
```
|
| 325 |
+
|
| 326 |
+
## Configuration
|
| 327 |
+
|
| 328 |
+
### System Configuration
|
| 329 |
+
|
| 330 |
+
Managed in `configs/system/system_config.py`:
|
| 331 |
+
|
| 332 |
+
```python
|
| 333 |
+
SYSTEM_CONFIG = {
|
| 334 |
+
"header_limit": 30,
|
| 335 |
+
"message_limit": 110,
|
| 336 |
+
"AI_Jargon": ["elevate", "enhance", "ignite", ...],
|
| 337 |
+
"AI_phrases_drumeo": [...],
|
| 338 |
+
"AI_phrases_pianote": [...],
|
| 339 |
+
...
|
| 340 |
+
}
|
| 341 |
+
```
|
| 342 |
+
|
| 343 |
+
### Campaign Configuration
|
| 344 |
+
|
| 345 |
+
Instructions can be set at two levels:
|
| 346 |
+
|
| 347 |
+
1. **Campaign-Wide Instructions** (apply to all stages):
|
| 348 |
+
```python
|
| 349 |
+
CAMPAIGNS = {
|
| 350 |
+
"re_engagement": {
|
| 351 |
+
"campaign_instructions": "Keep messages encouraging and upbeat",
|
| 352 |
+
...
|
| 353 |
+
}
|
| 354 |
+
}
|
| 355 |
+
```
|
| 356 |
+
|
| 357 |
+
2. **Per-Message Instructions** (stage-specific):
|
| 358 |
+
```python
|
| 359 |
+
"1": {
|
| 360 |
+
"stage": 1,
|
| 361 |
+
"instructions": "Focus on the recommended content",
|
| 362 |
+
...
|
| 363 |
+
}
|
| 364 |
+
```
|
| 365 |
+
|
| 366 |
+
## Adding New Agents
|
| 367 |
+
|
| 368 |
+
To add a new agent to the system:
|
| 369 |
+
|
| 370 |
+
1. **Create Agent Class**:
|
| 371 |
+
```python
|
| 372 |
+
from .base_agent import BaseAgent
|
| 373 |
+
|
| 374 |
+
class MyNewAgent(BaseAgent):
|
| 375 |
+
def __init__(self, core_config):
|
| 376 |
+
super().__init__(name="MyNewAgent", core_config=core_config)
|
| 377 |
+
|
| 378 |
+
def execute(self, context):
|
| 379 |
+
# Implement agent logic
|
| 380 |
+
return {
|
| 381 |
+
"success": True,
|
| 382 |
+
"data": {...},
|
| 383 |
+
"error": None
|
| 384 |
+
}
|
| 385 |
+
```
|
| 386 |
+
|
| 387 |
+
2. **Update Agent Orchestrator**:
|
| 388 |
+
```python
|
| 389 |
+
# In agent_orchestrator.py
|
| 390 |
+
self.my_new_agent = MyNewAgent(core_config)
|
| 391 |
+
|
| 392 |
+
# Add to workflow
|
| 393 |
+
result = self.my_new_agent.execute(context)
|
| 394 |
+
```
|
| 395 |
+
|
| 396 |
+
3. **Update `__init__.py`**:
|
| 397 |
+
```python
|
| 398 |
+
from .my_new_agent import MyNewAgent
|
| 399 |
+
|
| 400 |
+
__all__ = [
|
| 401 |
+
...
|
| 402 |
+
"MyNewAgent"
|
| 403 |
+
]
|
| 404 |
+
```
|
| 405 |
+
|
| 406 |
+
## Performance Considerations
|
| 407 |
+
|
| 408 |
+
### Parallel Processing
|
| 409 |
+
|
| 410 |
+
The agentic workflow maintains the parallel processing architecture:
|
| 411 |
+
- User chunks are processed in parallel at the top level
|
| 412 |
+
- Within each chunk, agents run sequentially for each user
|
| 413 |
+
- This balances throughput with quality control
|
| 414 |
+
|
| 415 |
+
### Latency
|
| 416 |
+
|
| 417 |
+
- **Rule-based validation**: < 1ms per message
|
| 418 |
+
- **LLM-based validation**: ~1-2 seconds per message
|
| 419 |
+
- **Total per user** (with 1 attempt): ~2-4 seconds
|
| 420 |
+
- **Total per user** (with 3 attempts): ~6-12 seconds
|
| 421 |
+
|
| 422 |
+
### Cost Optimization
|
| 423 |
+
|
| 424 |
+
- Rule-based checks eliminate most invalid messages before LLM validation
|
| 425 |
+
- Feedback loop reduces wasted generations
|
| 426 |
+
- Rejection logging helps identify and fix systematic issues
|
| 427 |
+
|
| 428 |
+
## Monitoring & Debugging
|
| 429 |
+
|
| 430 |
+
### Logging
|
| 431 |
+
|
| 432 |
+
All agents log their activities:
|
| 433 |
+
|
| 434 |
+
```python
|
| 435 |
+
import logging
|
| 436 |
+
logger = logging.getLogger()
|
| 437 |
+
logger.setLevel(logging.INFO)
|
| 438 |
+
```
|
| 439 |
+
|
| 440 |
+
### Rejection Statistics
|
| 441 |
+
|
| 442 |
+
After each run, check rejection stats:
|
| 443 |
+
|
| 444 |
+
```python
|
| 445 |
+
# Automatically logged at the end of generation
|
| 446 |
+
# Example output:
|
| 447 |
+
# Rejection stats: {
|
| 448 |
+
# 'total_rejections': 15,
|
| 449 |
+
# 'rule_based_rejections': 10,
|
| 450 |
+
# 'llm_based_rejections': 5,
|
| 451 |
+
# 'by_attempt': {1: 8, 2: 5, 3: 2},
|
| 452 |
+
# 'common_reasons': {
|
| 453 |
+
# 'Message exceeds limit': 6,
|
| 454 |
+
# 'Contains banned phrase': 4,
|
| 455 |
+
# ...
|
| 456 |
+
# }
|
| 457 |
+
# }
|
| 458 |
+
```
|
| 459 |
+
|
| 460 |
+
## Best Practices
|
| 461 |
+
|
| 462 |
+
1. **Provide Clear Instructions**: The more specific your instructions, the better the results
|
| 463 |
+
2. **Use Examples**: Examples help the agent understand your brand voice
|
| 464 |
+
3. **Monitor Rejections**: Regularly review rejection logs to identify issues
|
| 465 |
+
4. **Iterate on Prompts**: Use rejection feedback to improve campaign instructions
|
| 466 |
+
5. **Test in Stages**: Test with small user samples before full campaigns
|
| 467 |
+
6. **Balance Quality & Speed**: Consider whether 3 attempts is optimal for your use case
|
| 468 |
+
|
| 469 |
+
## Troubleshooting
|
| 470 |
+
|
| 471 |
+
### High Rejection Rate
|
| 472 |
+
|
| 473 |
+
- Check rejection logs for common patterns
|
| 474 |
+
- Review campaign and per-message instructions
|
| 475 |
+
- Ensure examples match your brand voice
|
| 476 |
+
- Verify character limits are achievable
|
| 477 |
+
|
| 478 |
+
### LLM Validation Failures
|
| 479 |
+
|
| 480 |
+
- Check LLM API connectivity
|
| 481 |
+
- Review validation prompt logic in SecurityAgent
|
| 482 |
+
- Consider adjusting validation criteria
|
| 483 |
+
|
| 484 |
+
### No Messages Generated
|
| 485 |
+
|
| 486 |
+
- Check if all attempts are being rejected
|
| 487 |
+
- Review rejection logs for the reason
|
| 488 |
+
- Verify user data quality
|
| 489 |
+
- Check for empty instructions or examples
|
| 490 |
+
|
| 491 |
+
## Future Enhancements
|
| 492 |
+
|
| 493 |
+
Potential additions to the agentic system:
|
| 494 |
+
|
| 495 |
+
1. **Performance Agent**: Analyzes engagement metrics and optimizes messages
|
| 496 |
+
2. **A/B Testing Agent**: Generates variations for testing
|
| 497 |
+
3. **Brand Consistency Agent**: Ensures messages align with brand guidelines
|
| 498 |
+
4. **Personalization Scorer**: Rates personalization quality
|
| 499 |
+
5. **Multi-Language Agent**: Handles translation and localization
|
| 500 |
+
6. **Content Safety Agent**: Additional safety checks for sensitive content
|
| 501 |
+
|
| 502 |
+
## Contributing
|
| 503 |
+
|
| 504 |
+
When modifying the agentic workflow:
|
| 505 |
+
|
| 506 |
+
1. Maintain the BaseAgent interface for new agents
|
| 507 |
+
2. Update this README with any changes
|
| 508 |
+
3. Add tests for new agents
|
| 509 |
+
4. Document new configuration options
|
| 510 |
+
5. Update rejection logging schema if needed
|
| 511 |
+
|
| 512 |
+
## License
|
| 513 |
+
|
| 514 |
+
Proprietary to Musora Media Inc.
|
| 515 |
+
|
| 516 |
+
## Contact
|
| 517 |
+
|
| 518 |
+
For questions or contributions: [email protected]
|
ai_messaging_system_v2/Messaging_system/agents/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agentic Workflow System for AI Messaging
|
| 3 |
+
|
| 4 |
+
This package contains the multi-agent system for generating and validating
|
| 5 |
+
personalized messages with enhanced quality control.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .base_agent import BaseAgent
|
| 9 |
+
from .generator_agent import GeneratorAgent
|
| 10 |
+
from .security_agent import SecurityAgent
|
| 11 |
+
from .agent_orchestrator import AgentOrchestrator
|
| 12 |
+
from .rejection_logger import RejectionLogger
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"BaseAgent",
|
| 16 |
+
"GeneratorAgent",
|
| 17 |
+
"SecurityAgent",
|
| 18 |
+
"AgentOrchestrator",
|
| 19 |
+
"RejectionLogger"
|
| 20 |
+
]
|
ai_messaging_system_v2/Messaging_system/agents/agent_orchestrator.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent Orchestrator
|
| 3 |
+
|
| 4 |
+
Manages the agentic workflow between GeneratorAgent and SecurityAgent.
|
| 5 |
+
Handles feedback loops, retry logic, and rejection logging.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
from typing import Dict, Any, Optional
|
| 10 |
+
import logging
|
| 11 |
+
import pandas as pd
|
| 12 |
+
from .base_agent import BaseAgent
|
| 13 |
+
from .generator_agent import GeneratorAgent
|
| 14 |
+
from .security_agent import SecurityAgent
|
| 15 |
+
from .rejection_logger import RejectionLogger
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class AgentOrchestrator:
|
| 21 |
+
"""
|
| 22 |
+
Orchestrates the agentic workflow for message generation and validation.
|
| 23 |
+
|
| 24 |
+
Workflow:
|
| 25 |
+
1. GeneratorAgent generates a message
|
| 26 |
+
2. SecurityAgent validates the message
|
| 27 |
+
3. If rejected, feedback is sent back to GeneratorAgent
|
| 28 |
+
4. Process repeats up to 3 attempts
|
| 29 |
+
5. All rejections are logged for evaluation
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, core_config: Any, rejection_logger: Optional[RejectionLogger] = None):
|
| 33 |
+
"""
|
| 34 |
+
Initialize the Agent Orchestrator.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
core_config: CoreConfig instance
|
| 38 |
+
rejection_logger: Optional RejectionLogger instance
|
| 39 |
+
"""
|
| 40 |
+
self.core_config = core_config
|
| 41 |
+
self.rejection_logger = rejection_logger
|
| 42 |
+
|
| 43 |
+
# Initialize agents
|
| 44 |
+
self.generator_agent = GeneratorAgent(core_config)
|
| 45 |
+
self.security_agent = SecurityAgent(core_config)
|
| 46 |
+
|
| 47 |
+
self.max_attempts = 3
|
| 48 |
+
|
| 49 |
+
def generate_and_validate_message(self, user: pd.Series, stage: int) -> Optional[Dict[str, Any]]:
|
| 50 |
+
"""
|
| 51 |
+
Generate and validate a message for a user with feedback loop.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
user: User data (pandas Series)
|
| 55 |
+
stage: Campaign stage number
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
Approved message dict or None if all attempts failed:
|
| 59 |
+
{
|
| 60 |
+
"header": str,
|
| 61 |
+
"message": str,
|
| 62 |
+
"metadata": {
|
| 63 |
+
"attempts": int,
|
| 64 |
+
"model": str,
|
| 65 |
+
...
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
"""
|
| 69 |
+
user_id = user.get("user_id", user.get("USER_ID", "unknown"))
|
| 70 |
+
feedback = None
|
| 71 |
+
|
| 72 |
+
for attempt in range(1, self.max_attempts + 1):
|
| 73 |
+
logger.info(f"[User {user_id}] Attempt {attempt}/{self.max_attempts}")
|
| 74 |
+
|
| 75 |
+
# Step 1: Generate message
|
| 76 |
+
gen_context = {
|
| 77 |
+
"user": user,
|
| 78 |
+
"stage": stage,
|
| 79 |
+
"attempt": attempt,
|
| 80 |
+
"feedback": feedback
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
gen_result = self.generator_agent.execute(gen_context)
|
| 84 |
+
|
| 85 |
+
if not gen_result["success"]:
|
| 86 |
+
logger.warning(f"[User {user_id}] Generation failed on attempt {attempt}: {gen_result['error']}")
|
| 87 |
+
|
| 88 |
+
# Log this as a generation failure
|
| 89 |
+
if self.rejection_logger:
|
| 90 |
+
self.rejection_logger.log_rejection(
|
| 91 |
+
user_id=str(user_id),
|
| 92 |
+
attempt_number=attempt,
|
| 93 |
+
rejection_reason="Generation failed",
|
| 94 |
+
validation_type="generation_error",
|
| 95 |
+
detailed_feedback=gen_result.get("error", "Unknown error"),
|
| 96 |
+
generated_header="",
|
| 97 |
+
generated_message="",
|
| 98 |
+
model_used=self.core_config.model,
|
| 99 |
+
prompt=gen_context.get("prompt", ""),
|
| 100 |
+
recommendation_info=str(user.get("recommendation_info", "")),
|
| 101 |
+
has_instructions=bool(self.core_config.campaign_instructions or self.core_config.per_message_instructions),
|
| 102 |
+
has_examples=bool(self.core_config.sample_example)
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# If generation fails, try again
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
# Extract generated content
|
| 109 |
+
header = gen_result["data"]["header"]
|
| 110 |
+
message = gen_result["data"]["message"]
|
| 111 |
+
prompt = gen_result["data"].get("prompt", "")
|
| 112 |
+
|
| 113 |
+
# Step 2: Validate message
|
| 114 |
+
val_context = {
|
| 115 |
+
"header": header,
|
| 116 |
+
"message": message,
|
| 117 |
+
"user": user,
|
| 118 |
+
"prompt": prompt,
|
| 119 |
+
"attempt": attempt
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
val_result = self.security_agent.execute(val_context)
|
| 123 |
+
|
| 124 |
+
# Check if approved
|
| 125 |
+
if val_result["success"] and val_result["data"]["approved"]:
|
| 126 |
+
logger.info(f"[User {user_id}] Message approved on attempt {attempt}")
|
| 127 |
+
|
| 128 |
+
# Return the approved message
|
| 129 |
+
return {
|
| 130 |
+
"header": header,
|
| 131 |
+
"message": message,
|
| 132 |
+
"metadata": {
|
| 133 |
+
"attempts": attempt,
|
| 134 |
+
"model": self.core_config.model,
|
| 135 |
+
"stage": stage,
|
| 136 |
+
"approved": True
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
# Message was rejected
|
| 141 |
+
rejection_reason = val_result["data"]["rejection_reason"]
|
| 142 |
+
detailed_feedback = val_result["data"]["detailed_feedback"]
|
| 143 |
+
validation_type = val_result["data"]["validation_type"]
|
| 144 |
+
|
| 145 |
+
logger.info(f"[User {user_id}] Message rejected on attempt {attempt}: {rejection_reason}")
|
| 146 |
+
|
| 147 |
+
# Log the rejection
|
| 148 |
+
if self.rejection_logger:
|
| 149 |
+
self.rejection_logger.log_rejection(
|
| 150 |
+
user_id=str(user_id),
|
| 151 |
+
attempt_number=attempt,
|
| 152 |
+
rejection_reason=rejection_reason,
|
| 153 |
+
validation_type=validation_type,
|
| 154 |
+
detailed_feedback=detailed_feedback,
|
| 155 |
+
generated_header=header,
|
| 156 |
+
generated_message=message,
|
| 157 |
+
model_used=self.core_config.model,
|
| 158 |
+
prompt=prompt,
|
| 159 |
+
recommendation_info=str(user.get("recommendation_info", ""))[:200],
|
| 160 |
+
has_instructions=bool(self.core_config.campaign_instructions or self.core_config.per_message_instructions),
|
| 161 |
+
has_examples=bool(self.core_config.sample_example)
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# Set feedback for next attempt
|
| 165 |
+
if attempt < self.max_attempts:
|
| 166 |
+
feedback = detailed_feedback
|
| 167 |
+
logger.info(f"[User {user_id}] Retrying with feedback: {feedback}")
|
| 168 |
+
else:
|
| 169 |
+
logger.warning(f"[User {user_id}] Max attempts ({self.max_attempts}) reached. Giving up.")
|
| 170 |
+
|
| 171 |
+
# All attempts failed
|
| 172 |
+
logger.warning(f"[User {user_id}] Failed to generate valid message after {self.max_attempts} attempts")
|
| 173 |
+
return None
|
| 174 |
+
|
| 175 |
+
def process_batch(self, users_df: pd.DataFrame, stage: int) -> pd.DataFrame:
|
| 176 |
+
"""
|
| 177 |
+
Process a batch of users through the agentic workflow.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
users_df: DataFrame of users
|
| 181 |
+
stage: Campaign stage number
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
DataFrame with generated messages and metadata
|
| 185 |
+
"""
|
| 186 |
+
logger.info(f"Processing batch of {len(users_df)} users through agentic workflow")
|
| 187 |
+
|
| 188 |
+
results = []
|
| 189 |
+
|
| 190 |
+
for idx, user in users_df.iterrows():
|
| 191 |
+
result = self.generate_and_validate_message(user, stage)
|
| 192 |
+
|
| 193 |
+
if result:
|
| 194 |
+
# Add the result to the user data
|
| 195 |
+
user_data = user.to_dict()
|
| 196 |
+
user_data["header"] = result["header"]
|
| 197 |
+
user_data["message"] = result["message"]
|
| 198 |
+
user_data["metadata"] = result["metadata"]
|
| 199 |
+
results.append(user_data)
|
| 200 |
+
else:
|
| 201 |
+
# Failed to generate valid message
|
| 202 |
+
user_data = user.to_dict()
|
| 203 |
+
user_data["header"] = None
|
| 204 |
+
user_data["message"] = None
|
| 205 |
+
user_data["metadata"] = {"attempts": self.max_attempts, "approved": False}
|
| 206 |
+
results.append(user_data)
|
| 207 |
+
|
| 208 |
+
# Convert results back to DataFrame
|
| 209 |
+
results_df = pd.DataFrame(results)
|
| 210 |
+
|
| 211 |
+
# Log summary
|
| 212 |
+
successful = len([r for r in results if r["message"] is not None])
|
| 213 |
+
logger.info(f"Batch processing complete: {successful}/{len(users_df)} messages generated successfully")
|
| 214 |
+
|
| 215 |
+
return results_df
|
| 216 |
+
|
| 217 |
+
def set_rejection_logger(self, rejection_logger: RejectionLogger):
|
| 218 |
+
"""Set the rejection logger."""
|
| 219 |
+
self.rejection_logger = rejection_logger
|
| 220 |
+
|
| 221 |
+
def get_stats(self) -> Dict[str, Any]:
|
| 222 |
+
"""
|
| 223 |
+
Get statistics about the orchestration process.
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
Dictionary with stats
|
| 227 |
+
"""
|
| 228 |
+
stats = {}
|
| 229 |
+
|
| 230 |
+
if self.rejection_logger:
|
| 231 |
+
stats["rejection_stats"] = self.rejection_logger.get_rejection_stats()
|
| 232 |
+
stats["rejection_log_path"] = str(self.rejection_logger.get_log_path())
|
| 233 |
+
|
| 234 |
+
return stats
|
ai_messaging_system_v2/Messaging_system/agents/base_agent.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base Agent Abstract Class
|
| 3 |
+
|
| 4 |
+
Provides the foundation for all agents in the agentic workflow system.
|
| 5 |
+
All agents must inherit from this class to ensure consistency and scalability.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from abc import ABC, abstractmethod
|
| 9 |
+
from typing import Dict, Any, Optional
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class BaseAgent(ABC):
|
| 16 |
+
"""
|
| 17 |
+
Abstract base class for all agents in the messaging system.
|
| 18 |
+
|
| 19 |
+
This class defines the common interface that all agents must implement,
|
| 20 |
+
ensuring consistency and making it easy to add new agents in the future.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, name: str, core_config: Any):
|
| 24 |
+
"""
|
| 25 |
+
Initialize the base agent.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
name: The name of the agent (e.g., "GeneratorAgent", "SecurityAgent")
|
| 29 |
+
core_config: CoreConfig instance containing system configuration
|
| 30 |
+
"""
|
| 31 |
+
self.name = name
|
| 32 |
+
self.core_config = core_config
|
| 33 |
+
self.logger = logging.getLogger(f"{__name__}.{name}")
|
| 34 |
+
|
| 35 |
+
@abstractmethod
|
| 36 |
+
def execute(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
| 37 |
+
"""
|
| 38 |
+
Execute the agent's main task.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
context: Dictionary containing all necessary context for the agent,
|
| 42 |
+
including user data, prompts, messages, feedback, etc.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
Dictionary containing the agent's output and status:
|
| 46 |
+
{
|
| 47 |
+
"success": bool,
|
| 48 |
+
"data": Any,
|
| 49 |
+
"error": Optional[str],
|
| 50 |
+
"metadata": Optional[Dict]
|
| 51 |
+
}
|
| 52 |
+
"""
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
def log_info(self, message: str):
|
| 56 |
+
"""Log informational message."""
|
| 57 |
+
self.logger.info(f"[{self.name}] {message}")
|
| 58 |
+
|
| 59 |
+
def log_warning(self, message: str):
|
| 60 |
+
"""Log warning message."""
|
| 61 |
+
self.logger.warning(f"[{self.name}] {message}")
|
| 62 |
+
|
| 63 |
+
def log_error(self, message: str):
|
| 64 |
+
"""Log error message."""
|
| 65 |
+
self.logger.error(f"[{self.name}] {message}")
|
| 66 |
+
|
| 67 |
+
def validate_context(self, context: Dict[str, Any], required_keys: list) -> bool:
|
| 68 |
+
"""
|
| 69 |
+
Validate that the context contains all required keys.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
context: The context dictionary to validate
|
| 73 |
+
required_keys: List of required keys
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
True if all required keys are present, False otherwise
|
| 77 |
+
"""
|
| 78 |
+
missing_keys = [key for key in required_keys if key not in context]
|
| 79 |
+
if missing_keys:
|
| 80 |
+
self.log_error(f"Missing required keys in context: {missing_keys}")
|
| 81 |
+
return False
|
| 82 |
+
return True
|
ai_messaging_system_v2/Messaging_system/agents/generator_agent.py
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Generator Agent
|
| 3 |
+
|
| 4 |
+
Handles prompt generation and message creation using cutting-edge prompt engineering.
|
| 5 |
+
Adapts prompts based on whether instructions and/or examples are provided.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from typing import Dict, Any, Optional
|
| 11 |
+
from .base_agent import BaseAgent
|
| 12 |
+
from ..LLM import LLM
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class GeneratorAgent(BaseAgent):
|
| 16 |
+
"""
|
| 17 |
+
Generator Agent - Creates personalized messages with advanced prompt engineering.
|
| 18 |
+
|
| 19 |
+
This agent:
|
| 20 |
+
- Generates prompts with different strategies based on instructions vs examples
|
| 21 |
+
- Creates personalized headers and messages using LLM
|
| 22 |
+
- Handles feedback from SecurityAgent for regeneration attempts
|
| 23 |
+
- Supports conditional prompt injection (e.g., birthday reminders)
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, core_config: Any):
|
| 27 |
+
"""
|
| 28 |
+
Initialize the Generator Agent.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
core_config: CoreConfig instance
|
| 32 |
+
"""
|
| 33 |
+
super().__init__(name="GeneratorAgent", core_config=core_config)
|
| 34 |
+
self.llm = LLM(core_config)
|
| 35 |
+
|
| 36 |
+
def execute(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
| 37 |
+
"""
|
| 38 |
+
Execute message generation.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
context: {
|
| 42 |
+
"user": pd.Series - user data row
|
| 43 |
+
"stage": int - campaign stage
|
| 44 |
+
"feedback": Optional[str] - feedback from previous rejection
|
| 45 |
+
"attempt": int - current attempt number (1-3)
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
{
|
| 50 |
+
"success": bool,
|
| 51 |
+
"data": {
|
| 52 |
+
"header": str,
|
| 53 |
+
"message": str,
|
| 54 |
+
"prompt": str # The prompt used
|
| 55 |
+
},
|
| 56 |
+
"error": Optional[str],
|
| 57 |
+
"metadata": {
|
| 58 |
+
"tokens_used": int,
|
| 59 |
+
"model": str
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
"""
|
| 63 |
+
# Validate required context
|
| 64 |
+
if not self.validate_context(context, ["user", "stage", "attempt"]):
|
| 65 |
+
return {
|
| 66 |
+
"success": False,
|
| 67 |
+
"data": None,
|
| 68 |
+
"error": "Missing required context keys"
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
user = context["user"]
|
| 72 |
+
stage = context["stage"]
|
| 73 |
+
attempt = context.get("attempt", 1)
|
| 74 |
+
feedback = context.get("feedback", None)
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
# Generate prompt with enhanced logic
|
| 78 |
+
prompt = self._generate_enhanced_prompt(user, stage, feedback)
|
| 79 |
+
|
| 80 |
+
# Get LLM instructions
|
| 81 |
+
instructions = self._get_llm_instructions(feedback)
|
| 82 |
+
|
| 83 |
+
# Generate message
|
| 84 |
+
response = self.llm.get_response(prompt=prompt, instructions=instructions)
|
| 85 |
+
|
| 86 |
+
if response is None:
|
| 87 |
+
return {
|
| 88 |
+
"success": False,
|
| 89 |
+
"data": None,
|
| 90 |
+
"error": "LLM returned no response"
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
# Validate response structure
|
| 94 |
+
if not isinstance(response, dict) or "header" not in response or "message" not in response:
|
| 95 |
+
return {
|
| 96 |
+
"success": False,
|
| 97 |
+
"data": None,
|
| 98 |
+
"error": "Invalid response structure from LLM"
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
return {
|
| 102 |
+
"success": True,
|
| 103 |
+
"data": {
|
| 104 |
+
"header": response.get("header", ""),
|
| 105 |
+
"message": response.get("message", ""),
|
| 106 |
+
"prompt": prompt
|
| 107 |
+
},
|
| 108 |
+
"error": None,
|
| 109 |
+
"metadata": {
|
| 110 |
+
"model": self.core_config.model,
|
| 111 |
+
"attempt": attempt
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
except Exception as e:
|
| 116 |
+
self.log_error(f"Error generating message: {str(e)}")
|
| 117 |
+
return {
|
| 118 |
+
"success": False,
|
| 119 |
+
"data": None,
|
| 120 |
+
"error": str(e)
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
def _generate_enhanced_prompt(self, user: pd.Series, stage: int, feedback: Optional[str] = None) -> str:
|
| 124 |
+
"""
|
| 125 |
+
Generate enhanced prompt based on instructions vs examples logic.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
user: User data
|
| 129 |
+
stage: Campaign stage
|
| 130 |
+
feedback: Feedback from SecurityAgent (if regenerating)
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
Formatted prompt string
|
| 134 |
+
"""
|
| 135 |
+
# Determine if we have instructions and/or examples
|
| 136 |
+
has_campaign_instructions = bool(self.core_config.campaign_instructions)
|
| 137 |
+
has_message_instructions = bool(self.core_config.per_message_instructions)
|
| 138 |
+
has_instructions = has_campaign_instructions or has_message_instructions
|
| 139 |
+
has_examples = bool(self.core_config.sample_example)
|
| 140 |
+
|
| 141 |
+
# Build prompt components
|
| 142 |
+
if stage == 1:
|
| 143 |
+
prompt = self._build_initial_message_prompt(user, has_instructions, has_examples, feedback)
|
| 144 |
+
else:
|
| 145 |
+
prompt = self._build_followup_message_prompt(user, has_instructions, has_examples, feedback)
|
| 146 |
+
|
| 147 |
+
return prompt
|
| 148 |
+
|
| 149 |
+
def _build_initial_message_prompt(self, user: pd.Series, has_instructions: bool,
|
| 150 |
+
has_examples: bool, feedback: Optional[str]) -> str:
|
| 151 |
+
"""
|
| 152 |
+
Build prompt for initial message (stage 1).
|
| 153 |
+
|
| 154 |
+
Implements different strategies based on instructions vs examples:
|
| 155 |
+
- Instructions + Examples: Prioritize instructions, use examples for style/voice
|
| 156 |
+
- Only Examples: Use examples to understand vocabulary, style, voice, then personalize
|
| 157 |
+
"""
|
| 158 |
+
# Start with input context
|
| 159 |
+
input_context = self._get_input_context(has_instructions, has_examples, feedback)
|
| 160 |
+
|
| 161 |
+
# Add examples if available
|
| 162 |
+
example_section = self._get_example_section(has_instructions, has_examples)
|
| 163 |
+
|
| 164 |
+
# Add user profile
|
| 165 |
+
user_profile = self._get_user_profile(user)
|
| 166 |
+
|
| 167 |
+
# Add conditional prompts (birthday, etc.)
|
| 168 |
+
conditional_prompts = self._get_conditional_prompts(user)
|
| 169 |
+
|
| 170 |
+
# Add instructions if available
|
| 171 |
+
instructions_section = self._get_instructions_section(has_instructions, has_examples)
|
| 172 |
+
|
| 173 |
+
# Add recommendation instructions
|
| 174 |
+
recommendation_section = self._get_recommendation_instructions(user)
|
| 175 |
+
|
| 176 |
+
# Add output instructions
|
| 177 |
+
output_section = self._get_output_instructions()
|
| 178 |
+
|
| 179 |
+
# Assemble the prompt
|
| 180 |
+
prompt = f"""
|
| 181 |
+
{input_context}
|
| 182 |
+
|
| 183 |
+
{example_section}
|
| 184 |
+
|
| 185 |
+
{user_profile}
|
| 186 |
+
|
| 187 |
+
{conditional_prompts}
|
| 188 |
+
|
| 189 |
+
{instructions_section}
|
| 190 |
+
|
| 191 |
+
{recommendation_section}
|
| 192 |
+
|
| 193 |
+
{output_section}
|
| 194 |
+
"""
|
| 195 |
+
|
| 196 |
+
return prompt.strip()
|
| 197 |
+
|
| 198 |
+
def _build_followup_message_prompt(self, user: pd.Series, has_instructions: bool,
|
| 199 |
+
has_examples: bool, feedback: Optional[str]) -> str:
|
| 200 |
+
"""
|
| 201 |
+
Build prompt for follow-up message (stages 2-11).
|
| 202 |
+
|
| 203 |
+
Includes previous message history to avoid repetition.
|
| 204 |
+
"""
|
| 205 |
+
# Get previous messages
|
| 206 |
+
previous_messages_str = str(user.get("previous_messages", ""))
|
| 207 |
+
|
| 208 |
+
# Start with context about previous messages
|
| 209 |
+
previous_context = f"""
|
| 210 |
+
We have previously sent these push notifications to the user and the user has not re-engaged yet:
|
| 211 |
+
|
| 212 |
+
**Previous Messages:**
|
| 213 |
+
{previous_messages_str}
|
| 214 |
+
|
| 215 |
+
**CRITICAL**: The new message must be different from previous messages. Avoid using similar words, phrases, and vocabulary from the previous sends. Check the last 2 messages especially carefully.
|
| 216 |
+
(Focus on structure/tone, not just words. "Ready to sing?" vs "Ready to practice?" = TOO SIMILAR)
|
| 217 |
+
"""
|
| 218 |
+
|
| 219 |
+
# Build the rest similar to initial message
|
| 220 |
+
input_context = self._get_input_context(has_instructions, has_examples, feedback)
|
| 221 |
+
example_section = self._get_example_section(has_instructions, has_examples)
|
| 222 |
+
user_profile = self._get_user_profile(user)
|
| 223 |
+
conditional_prompts = self._get_conditional_prompts(user)
|
| 224 |
+
instructions_section = self._get_instructions_section(has_instructions, has_examples)
|
| 225 |
+
recommendation_section = self._get_recommendation_instructions(user)
|
| 226 |
+
output_section = self._get_output_instructions()
|
| 227 |
+
|
| 228 |
+
prompt = f"""
|
| 229 |
+
{previous_context}
|
| 230 |
+
|
| 231 |
+
{input_context}
|
| 232 |
+
|
| 233 |
+
{example_section}
|
| 234 |
+
|
| 235 |
+
{user_profile}
|
| 236 |
+
|
| 237 |
+
{conditional_prompts}
|
| 238 |
+
|
| 239 |
+
{instructions_section}
|
| 240 |
+
|
| 241 |
+
{recommendation_section}
|
| 242 |
+
|
| 243 |
+
{output_section}
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
return prompt.strip()
|
| 247 |
+
|
| 248 |
+
def _get_input_context(self, has_instructions: bool, has_examples: bool,
|
| 249 |
+
feedback: Optional[str]) -> str:
|
| 250 |
+
"""Get the input context based on instructions/examples availability."""
|
| 251 |
+
|
| 252 |
+
# Add feedback if this is a regeneration attempt
|
| 253 |
+
feedback_section = ""
|
| 254 |
+
if feedback:
|
| 255 |
+
feedback_section = f"""
|
| 256 |
+
**IMPORTANT - Previous Attempt Feedback:**
|
| 257 |
+
{feedback}
|
| 258 |
+
|
| 259 |
+
Please address the feedback above in this new attempt.
|
| 260 |
+
"""
|
| 261 |
+
|
| 262 |
+
if has_instructions:
|
| 263 |
+
context = f"""
|
| 264 |
+
Your task is to generate a personalized 'header' and 'message' as a push notification for a student learning {self.core_config.get_instrument()}.
|
| 265 |
+
|
| 266 |
+
You should follow the instructions provided and use the examples (if available) to understand our brand's voice, style, and tone.
|
| 267 |
+
|
| 268 |
+
**Character Limits**: header < {self.core_config.config_file["header_limit"]} and message < {self.core_config.config_file["message_limit"]} characters.
|
| 269 |
+
|
| 270 |
+
{feedback_section}
|
| 271 |
+
"""
|
| 272 |
+
elif has_examples:
|
| 273 |
+
# Only examples, no instructions
|
| 274 |
+
context = f"""
|
| 275 |
+
Your task is to generate ONE personalized 'header' and 'message' as a push notification for a student learning {self.core_config.get_instrument()}.
|
| 276 |
+
|
| 277 |
+
**IMPORTANT**: Carefully analyze the example messages provided below to understand our brand's:
|
| 278 |
+
- Vocabulary and word choices
|
| 279 |
+
- Writing style and tone for header and message
|
| 280 |
+
- Voice and personality
|
| 281 |
+
- Sentence structure and rhythm for header and message
|
| 282 |
+
- Character and feel
|
| 283 |
+
|
| 284 |
+
Stick to the vocabulary and style. Select one of the messages, Modify the example slightly to make it more personalized giving the instructions and provide one modified personalized 'header' and 'message' as the output.
|
| 285 |
+
|
| 286 |
+
**Character Limits**: header < {self.core_config.config_file["header_limit"]} and message < {self.core_config.config_file["message_limit"]} characters.
|
| 287 |
+
|
| 288 |
+
{feedback_section}
|
| 289 |
+
"""
|
| 290 |
+
else:
|
| 291 |
+
# No instructions, no examples (fallback)
|
| 292 |
+
context = f"""
|
| 293 |
+
Your task is to generate a personalized 'header' and 'message' as a push notification for a student learning {self.core_config.get_instrument()}.
|
| 294 |
+
|
| 295 |
+
**Character Limits**: header < {self.core_config.config_file["header_limit"]} and message < {self.core_config.config_file["message_limit"]} characters.
|
| 296 |
+
|
| 297 |
+
{feedback_section}
|
| 298 |
+
"""
|
| 299 |
+
|
| 300 |
+
return context.strip()
|
| 301 |
+
|
| 302 |
+
def _get_example_section(self, has_instructions: bool, has_examples: bool) -> str:
|
| 303 |
+
"""Get the examples section with appropriate framing."""
|
| 304 |
+
if not self.core_config.sample_example:
|
| 305 |
+
return ""
|
| 306 |
+
|
| 307 |
+
if has_instructions and has_examples:
|
| 308 |
+
# Both: examples demonstrate style/voice
|
| 309 |
+
header = "### **Example Messages (demonstrating our brand's voice and style):**"
|
| 310 |
+
elif has_examples and not has_instructions:
|
| 311 |
+
# Only examples: analyze these to understand brand
|
| 312 |
+
header = "### **Example Messages:**\nStudy these examples, and stick to the vocabulary and style. Select one of them, Modify the example slightly to make it more personalized giving the instructions and provide one modified personalized 'header' and 'message' as the output."
|
| 313 |
+
else:
|
| 314 |
+
header = "### **Example Messages:**"
|
| 315 |
+
|
| 316 |
+
return f"""
|
| 317 |
+
{header}
|
| 318 |
+
|
| 319 |
+
{self.core_config.sample_example}
|
| 320 |
+
"""
|
| 321 |
+
|
| 322 |
+
def _get_user_profile(self, user: pd.Series) -> str:
|
| 323 |
+
"""Get user profile section."""
|
| 324 |
+
user_info = f"""
|
| 325 |
+
### **User Information - Use to Personalize the Message:**
|
| 326 |
+
|
| 327 |
+
- The user is learning {self.core_config.get_instrument()}.
|
| 328 |
+
- {self.core_config.segment_info if self.core_config.segment_info else ""}
|
| 329 |
+
- User profile (use indirectly to improve personalization):
|
| 330 |
+
{user.get("user_info", "Not available")}
|
| 331 |
+
"""
|
| 332 |
+
return user_info.strip()
|
| 333 |
+
|
| 334 |
+
def _get_conditional_prompts(self, user: pd.Series) -> str:
|
| 335 |
+
"""
|
| 336 |
+
Get conditional prompt injections (e.g., birthday reminders).
|
| 337 |
+
|
| 338 |
+
Returns:
|
| 339 |
+
Conditional prompt section or empty string
|
| 340 |
+
"""
|
| 341 |
+
conditional_sections = []
|
| 342 |
+
|
| 343 |
+
# Birthday reminder
|
| 344 |
+
if pd.notna(user.get("birthday_reminder")) and user.get("birthday_reminder") not in [None, [], {}]:
|
| 345 |
+
days_until = user.get("birthday_reminder")
|
| 346 |
+
birthday_prompt = f"""
|
| 347 |
+
### **Special Note - Birthday Reminder:**
|
| 348 |
+
The user has a birthday coming up in {days_until} days. Include a brief, friendly birthday mention in the message if it fits naturally.
|
| 349 |
+
"""
|
| 350 |
+
conditional_sections.append(birthday_prompt.strip())
|
| 351 |
+
|
| 352 |
+
# Future conditional prompts can be added here
|
| 353 |
+
# First name
|
| 354 |
+
if pd.notna(user.get("first_name")) and user.get("first_name") not in [None, [], {}]:
|
| 355 |
+
name = user.get("first_name")
|
| 356 |
+
name_prompt = f"""
|
| 357 |
+
User name is: {name} ; if the name is a valid name (e.g. not email, none sense), use the name in the 'header'. If we have used their name in our previous messages, DON'T USE IT AGAIN.
|
| 358 |
+
"""
|
| 359 |
+
conditional_sections.append(name_prompt.strip())
|
| 360 |
+
# Example: achievement milestones, streaks, etc.
|
| 361 |
+
|
| 362 |
+
return "\n\n".join(conditional_sections) if conditional_sections else ""
|
| 363 |
+
|
| 364 |
+
def _get_instructions_section(self, has_instructions: bool, has_examples: bool) -> str:
|
| 365 |
+
"""Get instructions section."""
|
| 366 |
+
if not has_instructions:
|
| 367 |
+
return ""
|
| 368 |
+
|
| 369 |
+
instructions_parts = []
|
| 370 |
+
|
| 371 |
+
# Campaign-wide instructions
|
| 372 |
+
if self.core_config.campaign_instructions:
|
| 373 |
+
instructions_parts.append(f"""### **Campaign Instructions:**
|
| 374 |
+
{self.core_config.campaign_instructions}""")
|
| 375 |
+
|
| 376 |
+
# Per-message instructions
|
| 377 |
+
if self.core_config.per_message_instructions:
|
| 378 |
+
instructions_parts.append(f"""### **Additional Instructions for This Message:**
|
| 379 |
+
{self.core_config.per_message_instructions}""")
|
| 380 |
+
|
| 381 |
+
if has_examples:
|
| 382 |
+
instructions_parts.append("\nUse the example messages to understand our brand's voice and style while following these instructions.")
|
| 383 |
+
|
| 384 |
+
return "\n\n".join(instructions_parts)
|
| 385 |
+
|
| 386 |
+
def _get_recommendation_instructions(self, user: pd.Series) -> str:
|
| 387 |
+
"""Get content recommendation instructions."""
|
| 388 |
+
if not self.core_config.involve_recsys_result:
|
| 389 |
+
return ""
|
| 390 |
+
|
| 391 |
+
recommendation_info = user.get("recommendation_info", "")
|
| 392 |
+
if not recommendation_info:
|
| 393 |
+
return ""
|
| 394 |
+
|
| 395 |
+
instructions = f"""
|
| 396 |
+
### **Content Recommendation Guidelines:**
|
| 397 |
+
|
| 398 |
+
Below is the content we want to recommend to the user:
|
| 399 |
+
|
| 400 |
+
{recommendation_info}
|
| 401 |
+
|
| 402 |
+
When incorporating this content:
|
| 403 |
+
1. **Title Usage**: Refer to the content naturally - paraphrase or describe it, don't quote verbatim. Avoid promotional tone.
|
| 404 |
+
2. **Content Type**: Mention the type (course, workout, etc.) only if it flows naturally.
|
| 405 |
+
3. **Artist/Instructor**: If the full name is available, mention it casually if appropriate. If only first name, do NOT include it. NEVER assume or hallucinate names.
|
| 406 |
+
4. **Tone**: Keep it light, supportive, engaging, and personal.
|
| 407 |
+
5. **Time Reference**: NEVER use time-related words ("new," "recent," "latest") or imply recency.
|
| 408 |
+
|
| 409 |
+
Make the recommendation feel personalized and casually relevant, not generic or marketing-like.
|
| 410 |
+
"""
|
| 411 |
+
return instructions.strip()
|
| 412 |
+
|
| 413 |
+
def _get_output_instructions(self) -> str:
|
| 414 |
+
"""Get output format instructions."""
|
| 415 |
+
instructions = f"""
|
| 416 |
+
### **Output Format:**
|
| 417 |
+
|
| 418 |
+
Return a valid JSON object with this exact structure:
|
| 419 |
+
|
| 420 |
+
{{
|
| 421 |
+
"header": "your generated header",
|
| 422 |
+
"message": "your generated message"
|
| 423 |
+
}}
|
| 424 |
+
|
| 425 |
+
**Remember**:
|
| 426 |
+
- header < {self.core_config.config_file["header_limit"]} characters
|
| 427 |
+
- message < {self.core_config.config_file["message_limit"]} characters
|
| 428 |
+
- Valid JSON format
|
| 429 |
+
"""
|
| 430 |
+
return instructions.strip()
|
| 431 |
+
|
| 432 |
+
def _get_llm_instructions(self, feedback: Optional[str]) -> str:
|
| 433 |
+
"""
|
| 434 |
+
Get system-level instructions for the LLM.
|
| 435 |
+
|
| 436 |
+
Args:
|
| 437 |
+
feedback: Feedback from previous rejection (if any)
|
| 438 |
+
|
| 439 |
+
Returns:
|
| 440 |
+
System instructions string
|
| 441 |
+
"""
|
| 442 |
+
banned_phrases = "\n".join(f"- {word}" for word in self.core_config.config_file.get("AI_Jargon", []))
|
| 443 |
+
brand_jargon_key = f"AI_phrases_{self.core_config.brand}"
|
| 444 |
+
jargon_list = "\n".join(f"- {word}" for word in self.core_config.config_file.get(brand_jargon_key, []))
|
| 445 |
+
|
| 446 |
+
instructions = f"""
|
| 447 |
+
You are an expert at creating personalized, engaging push notifications for {self.core_config.get_instrument()} students.
|
| 448 |
+
|
| 449 |
+
**Critical Rules:**
|
| 450 |
+
- NEVER use time-related words ("new," "recent," "latest," etc.) or imply recency
|
| 451 |
+
- Stay within character limits: header < {self.core_config.config_file["header_limit"]}, message < {self.core_config.config_file["message_limit"]}
|
| 452 |
+
- Return valid JSON with "header" and "message" keys
|
| 453 |
+
|
| 454 |
+
**Avoid these phrases and similar variations:**
|
| 455 |
+
{banned_phrases}
|
| 456 |
+
|
| 457 |
+
**Also avoid these phrases:**
|
| 458 |
+
{jargon_list}
|
| 459 |
+
|
| 460 |
+
**Tone**: Supportive, engaging, personal - never pushy or promotional.
|
| 461 |
+
"""
|
| 462 |
+
|
| 463 |
+
if feedback:
|
| 464 |
+
instructions += f"""
|
| 465 |
+
|
| 466 |
+
**IMPORTANT**: Your previous attempt was rejected. Address this feedback:
|
| 467 |
+
{feedback}
|
| 468 |
+
"""
|
| 469 |
+
|
| 470 |
+
return instructions.strip()
|
ai_messaging_system_v2/Messaging_system/agents/rejection_logger.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Rejection Logger
|
| 3 |
+
|
| 4 |
+
Logs all rejected messages to CSV files for evaluation and optimization purposes.
|
| 5 |
+
One CSV file is created per campaign run.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import csv
|
| 9 |
+
import os
|
| 10 |
+
from datetime import datetime
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Dict, Any, Optional
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class RejectionLogger:
|
| 19 |
+
"""
|
| 20 |
+
Handles logging of rejected messages to CSV files.
|
| 21 |
+
|
| 22 |
+
Creates one CSV file per campaign run containing all rejections
|
| 23 |
+
with detailed information for LLM evaluation and process optimization.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, campaign_name: str, brand: str, stage: int, base_dir: Optional[str] = None):
|
| 27 |
+
"""
|
| 28 |
+
Initialize the rejection logger.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
campaign_name: Name of the campaign
|
| 32 |
+
brand: Brand name (drumeo, pianote, etc.)
|
| 33 |
+
stage: Campaign stage number
|
| 34 |
+
base_dir: Base directory for logs (defaults to ai_messaging_system_v2/logs/rejections/)
|
| 35 |
+
"""
|
| 36 |
+
self.campaign_name = campaign_name
|
| 37 |
+
self.brand = brand
|
| 38 |
+
self.stage = stage
|
| 39 |
+
|
| 40 |
+
# Set up the log directory
|
| 41 |
+
if base_dir is None:
|
| 42 |
+
# Default to logs/rejections/ within the project
|
| 43 |
+
project_root = Path(__file__).parent.parent.parent
|
| 44 |
+
base_dir = project_root / "logs" / "rejections"
|
| 45 |
+
else:
|
| 46 |
+
base_dir = Path(base_dir)
|
| 47 |
+
|
| 48 |
+
# Create directory if it doesn't exist
|
| 49 |
+
base_dir.mkdir(parents=True, exist_ok=True)
|
| 50 |
+
|
| 51 |
+
# Create unique filename with timestamp
|
| 52 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 53 |
+
filename = f"{brand}_{campaign_name}_stage{stage}_{timestamp}_rejections.csv"
|
| 54 |
+
self.log_file = base_dir / filename
|
| 55 |
+
|
| 56 |
+
# CSV headers
|
| 57 |
+
self.headers = [
|
| 58 |
+
"timestamp",
|
| 59 |
+
"user_id",
|
| 60 |
+
"attempt_number",
|
| 61 |
+
"rejection_reason",
|
| 62 |
+
"validation_type", # rule_based or llm_based
|
| 63 |
+
"detailed_feedback",
|
| 64 |
+
"generated_header",
|
| 65 |
+
"generated_message",
|
| 66 |
+
"header_length",
|
| 67 |
+
"message_length",
|
| 68 |
+
"model_used",
|
| 69 |
+
"prompt_excerpt", # First 200 chars of prompt for reference
|
| 70 |
+
"recommendation_info",
|
| 71 |
+
"has_instructions",
|
| 72 |
+
"has_examples"
|
| 73 |
+
]
|
| 74 |
+
|
| 75 |
+
# Initialize the CSV file with headers
|
| 76 |
+
self._initialize_csv()
|
| 77 |
+
|
| 78 |
+
logger.info(f"Rejection logger initialized: {self.log_file}")
|
| 79 |
+
|
| 80 |
+
def _initialize_csv(self):
|
| 81 |
+
"""Create the CSV file and write headers."""
|
| 82 |
+
with open(self.log_file, 'w', newline='', encoding='utf-8') as f:
|
| 83 |
+
writer = csv.DictWriter(f, fieldnames=self.headers)
|
| 84 |
+
writer.writeheader()
|
| 85 |
+
|
| 86 |
+
def log_rejection(self,
|
| 87 |
+
user_id: str,
|
| 88 |
+
attempt_number: int,
|
| 89 |
+
rejection_reason: str,
|
| 90 |
+
validation_type: str,
|
| 91 |
+
detailed_feedback: str,
|
| 92 |
+
generated_header: str,
|
| 93 |
+
generated_message: str,
|
| 94 |
+
model_used: str,
|
| 95 |
+
prompt: str = "",
|
| 96 |
+
recommendation_info: str = "",
|
| 97 |
+
has_instructions: bool = False,
|
| 98 |
+
has_examples: bool = False,
|
| 99 |
+
additional_data: Optional[Dict[str, Any]] = None):
|
| 100 |
+
"""
|
| 101 |
+
Log a rejected message to the CSV file.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
user_id: ID of the user
|
| 105 |
+
attempt_number: Which attempt this was (1, 2, or 3)
|
| 106 |
+
rejection_reason: Brief reason for rejection
|
| 107 |
+
validation_type: "rule_based" or "llm_based"
|
| 108 |
+
detailed_feedback: Detailed feedback to help improve next attempt
|
| 109 |
+
generated_header: The header that was rejected
|
| 110 |
+
generated_message: The message that was rejected
|
| 111 |
+
model_used: LLM model that generated the message
|
| 112 |
+
prompt: The prompt used (optional, will be truncated)
|
| 113 |
+
recommendation_info: Content recommendation info (optional)
|
| 114 |
+
has_instructions: Whether instructions were provided
|
| 115 |
+
has_examples: Whether examples were provided
|
| 116 |
+
additional_data: Any additional data to log (optional)
|
| 117 |
+
"""
|
| 118 |
+
try:
|
| 119 |
+
row_data = {
|
| 120 |
+
"timestamp": datetime.now().isoformat(),
|
| 121 |
+
"user_id": user_id,
|
| 122 |
+
"attempt_number": attempt_number,
|
| 123 |
+
"rejection_reason": rejection_reason,
|
| 124 |
+
"validation_type": validation_type,
|
| 125 |
+
"detailed_feedback": detailed_feedback,
|
| 126 |
+
"generated_header": generated_header,
|
| 127 |
+
"generated_message": generated_message,
|
| 128 |
+
"header_length": len(generated_header) if generated_header else 0,
|
| 129 |
+
"message_length": len(generated_message) if generated_message else 0,
|
| 130 |
+
"model_used": model_used,
|
| 131 |
+
"prompt_excerpt": prompt[:200] if prompt else "",
|
| 132 |
+
"recommendation_info": recommendation_info,
|
| 133 |
+
"has_instructions": has_instructions,
|
| 134 |
+
"has_examples": has_examples
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
# Add any additional data
|
| 138 |
+
if additional_data:
|
| 139 |
+
row_data.update(additional_data)
|
| 140 |
+
|
| 141 |
+
# Write to CSV
|
| 142 |
+
with open(self.log_file, 'a', newline='', encoding='utf-8') as f:
|
| 143 |
+
writer = csv.DictWriter(f, fieldnames=self.headers, extrasaction='ignore')
|
| 144 |
+
writer.writerow(row_data)
|
| 145 |
+
|
| 146 |
+
logger.debug(f"Logged rejection for user {user_id}, attempt {attempt_number}")
|
| 147 |
+
|
| 148 |
+
except Exception as e:
|
| 149 |
+
logger.error(f"Error logging rejection: {str(e)}")
|
| 150 |
+
|
| 151 |
+
def get_log_path(self) -> Path:
|
| 152 |
+
"""Return the path to the log file."""
|
| 153 |
+
return self.log_file
|
| 154 |
+
|
| 155 |
+
def get_rejection_count(self) -> int:
|
| 156 |
+
"""
|
| 157 |
+
Get the total number of rejections logged.
|
| 158 |
+
|
| 159 |
+
Returns:
|
| 160 |
+
Number of rejections (excluding header row)
|
| 161 |
+
"""
|
| 162 |
+
try:
|
| 163 |
+
with open(self.log_file, 'r', encoding='utf-8') as f:
|
| 164 |
+
return sum(1 for _ in f) - 1 # Subtract header row
|
| 165 |
+
except Exception as e:
|
| 166 |
+
logger.error(f"Error counting rejections: {str(e)}")
|
| 167 |
+
return 0
|
| 168 |
+
|
| 169 |
+
def get_rejection_stats(self) -> Dict[str, Any]:
|
| 170 |
+
"""
|
| 171 |
+
Get statistics about rejections.
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
Dictionary with rejection statistics
|
| 175 |
+
"""
|
| 176 |
+
try:
|
| 177 |
+
stats = {
|
| 178 |
+
"total_rejections": 0,
|
| 179 |
+
"rule_based_rejections": 0,
|
| 180 |
+
"llm_based_rejections": 0,
|
| 181 |
+
"by_attempt": {1: 0, 2: 0, 3: 0},
|
| 182 |
+
"common_reasons": {}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
with open(self.log_file, 'r', encoding='utf-8') as f:
|
| 186 |
+
reader = csv.DictReader(f)
|
| 187 |
+
for row in reader:
|
| 188 |
+
stats["total_rejections"] += 1
|
| 189 |
+
|
| 190 |
+
# Count by validation type
|
| 191 |
+
if row["validation_type"] == "rule_based":
|
| 192 |
+
stats["rule_based_rejections"] += 1
|
| 193 |
+
elif row["validation_type"] == "llm_based":
|
| 194 |
+
stats["llm_based_rejections"] += 1
|
| 195 |
+
|
| 196 |
+
# Count by attempt
|
| 197 |
+
attempt = int(row["attempt_number"])
|
| 198 |
+
if attempt in stats["by_attempt"]:
|
| 199 |
+
stats["by_attempt"][attempt] += 1
|
| 200 |
+
|
| 201 |
+
# Count common reasons
|
| 202 |
+
reason = row["rejection_reason"]
|
| 203 |
+
stats["common_reasons"][reason] = stats["common_reasons"].get(reason, 0) + 1
|
| 204 |
+
|
| 205 |
+
return stats
|
| 206 |
+
|
| 207 |
+
except Exception as e:
|
| 208 |
+
logger.error(f"Error getting rejection stats: {str(e)}")
|
| 209 |
+
return {}
|
ai_messaging_system_v2/Messaging_system/agents/security_agent.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Security Agent
|
| 3 |
+
|
| 4 |
+
Validates generated messages using rule-based and LLM-based checks.
|
| 5 |
+
Acts as a firewall to ensure messages meet quality standards and expectations.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import re
|
| 10 |
+
from typing import Dict, Any, Optional, List, Tuple
|
| 11 |
+
from .base_agent import BaseAgent
|
| 12 |
+
from ..LLM import LLM
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SecurityAgent(BaseAgent):
|
| 16 |
+
"""
|
| 17 |
+
Security Agent - Validates messages with rule-based and LLM-based checks.
|
| 18 |
+
|
| 19 |
+
This agent:
|
| 20 |
+
- Performs fast rule-based validation first
|
| 21 |
+
- Runs LLM-based validation for instruction adherence, content accuracy, and authenticity
|
| 22 |
+
- Provides detailed feedback for regeneration
|
| 23 |
+
- Can approve or reject messages
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, core_config: Any):
|
| 27 |
+
"""
|
| 28 |
+
Initialize the Security Agent.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
core_config: CoreConfig instance
|
| 32 |
+
"""
|
| 33 |
+
super().__init__(name="SecurityAgent", core_config=core_config)
|
| 34 |
+
self.llm = LLM(core_config)
|
| 35 |
+
|
| 36 |
+
def execute(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
| 37 |
+
"""
|
| 38 |
+
Execute message validation.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
context: {
|
| 42 |
+
"header": str,
|
| 43 |
+
"message": str,
|
| 44 |
+
"user": pd.Series,
|
| 45 |
+
"prompt": str,
|
| 46 |
+
"attempt": int
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
{
|
| 51 |
+
"success": bool, # True = approved, False = rejected
|
| 52 |
+
"data": {
|
| 53 |
+
"approved": bool,
|
| 54 |
+
"rejection_reason": Optional[str],
|
| 55 |
+
"detailed_feedback": Optional[str],
|
| 56 |
+
"validation_type": str # "rule_based" or "llm_based"
|
| 57 |
+
},
|
| 58 |
+
"error": Optional[str]
|
| 59 |
+
}
|
| 60 |
+
"""
|
| 61 |
+
# Validate required context
|
| 62 |
+
if not self.validate_context(context, ["header", "message", "user"]):
|
| 63 |
+
return {
|
| 64 |
+
"success": False,
|
| 65 |
+
"data": {
|
| 66 |
+
"approved": False,
|
| 67 |
+
"rejection_reason": "Missing context",
|
| 68 |
+
"detailed_feedback": "Required validation context missing",
|
| 69 |
+
"validation_type": "rule_based"
|
| 70 |
+
},
|
| 71 |
+
"error": "Missing required context keys"
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
header = context["header"]
|
| 75 |
+
message = context["message"]
|
| 76 |
+
user = context["user"]
|
| 77 |
+
prompt = context.get("prompt", "")
|
| 78 |
+
attempt = context.get("attempt", 1)
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
# Step 1: Rule-based validation (fast)
|
| 82 |
+
rule_result = self._rule_based_validation(header, message)
|
| 83 |
+
if not rule_result["passed"]:
|
| 84 |
+
self.log_info(f"Rule-based validation failed: {rule_result['reason']}")
|
| 85 |
+
return {
|
| 86 |
+
"success": False,
|
| 87 |
+
"data": {
|
| 88 |
+
"approved": False,
|
| 89 |
+
"rejection_reason": rule_result["reason"],
|
| 90 |
+
"detailed_feedback": rule_result["feedback"],
|
| 91 |
+
"validation_type": "rule_based"
|
| 92 |
+
},
|
| 93 |
+
"error": None
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
# skipping LLM-based validation for now - UI purposes --> be faster
|
| 97 |
+
# Step 2: LLM-based validation (includes similarity check for follow-up messages)
|
| 98 |
+
# llm_result = self._llm_based_validation(header, message, user, prompt)
|
| 99 |
+
# if not llm_result["passed"]:
|
| 100 |
+
# self.log_info(f"LLM-based validation failed: {llm_result['reason']}")
|
| 101 |
+
# return {
|
| 102 |
+
# "success": False,
|
| 103 |
+
# "data": {
|
| 104 |
+
# "approved": False,
|
| 105 |
+
# "rejection_reason": llm_result["reason"],
|
| 106 |
+
# "detailed_feedback": llm_result["feedback"],
|
| 107 |
+
# "validation_type": "llm_based"
|
| 108 |
+
# },
|
| 109 |
+
# "error": None
|
| 110 |
+
# }
|
| 111 |
+
|
| 112 |
+
# All validations passed
|
| 113 |
+
self.log_info("Message approved")
|
| 114 |
+
return {
|
| 115 |
+
"success": True,
|
| 116 |
+
"data": {
|
| 117 |
+
"approved": True,
|
| 118 |
+
"rejection_reason": None,
|
| 119 |
+
"detailed_feedback": None,
|
| 120 |
+
"validation_type": "approved"
|
| 121 |
+
},
|
| 122 |
+
"error": None
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
except Exception as e:
|
| 126 |
+
self.log_error(f"Error during validation: {str(e)}")
|
| 127 |
+
return {
|
| 128 |
+
"success": False,
|
| 129 |
+
"data": {
|
| 130 |
+
"approved": False,
|
| 131 |
+
"rejection_reason": "Validation error",
|
| 132 |
+
"detailed_feedback": str(e),
|
| 133 |
+
"validation_type": "error"
|
| 134 |
+
},
|
| 135 |
+
"error": str(e)
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
def _rule_based_validation(self, header: str, message: str) -> Dict[str, Any]:
|
| 139 |
+
"""
|
| 140 |
+
Perform fast rule-based validation.
|
| 141 |
+
|
| 142 |
+
Checks:
|
| 143 |
+
- Character limits
|
| 144 |
+
- Empty content
|
| 145 |
+
- Banned phrases
|
| 146 |
+
- Time-related words
|
| 147 |
+
- Basic structure
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
{
|
| 151 |
+
"passed": bool,
|
| 152 |
+
"reason": Optional[str],
|
| 153 |
+
"feedback": Optional[str]
|
| 154 |
+
}
|
| 155 |
+
"""
|
| 156 |
+
# Check for empty or None
|
| 157 |
+
if not header or not message:
|
| 158 |
+
return {
|
| 159 |
+
"passed": False,
|
| 160 |
+
"reason": "Empty header or message",
|
| 161 |
+
"feedback": "Both header and message must have content. Please generate non-empty text."
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
# Strip whitespace for checks
|
| 165 |
+
header = header.strip()
|
| 166 |
+
message = message.strip()
|
| 167 |
+
|
| 168 |
+
# Check character limits
|
| 169 |
+
header_limit = self.core_config.config_file.get("header_limit", 30)
|
| 170 |
+
message_limit = self.core_config.config_file.get("message_limit", 110)
|
| 171 |
+
|
| 172 |
+
header_len = len(header)
|
| 173 |
+
message_len = len(message)
|
| 174 |
+
|
| 175 |
+
if header_len > header_limit:
|
| 176 |
+
chars_over = header_len - header_limit
|
| 177 |
+
return {
|
| 178 |
+
"passed": False,
|
| 179 |
+
"reason": f"Header exceeds limit ({header_len}/{header_limit} chars)",
|
| 180 |
+
"feedback": f"Header is {chars_over} characters too long. Shorten it to {header_limit} characters while keeping the key message."
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
if message_len > message_limit:
|
| 184 |
+
chars_over = message_len - message_limit
|
| 185 |
+
return {
|
| 186 |
+
"passed": False,
|
| 187 |
+
"reason": f"Message exceeds limit ({message_len}/{message_limit} chars)",
|
| 188 |
+
"feedback": f"Message is {chars_over} characters too long. Reduce to {message_limit} characters by removing unnecessary words while preserving personalization and content recommendation."
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
# Check for banned AI jargon
|
| 192 |
+
banned_check = self._check_banned_phrases(header, message)
|
| 193 |
+
if not banned_check["passed"]:
|
| 194 |
+
return banned_check
|
| 195 |
+
|
| 196 |
+
# Check for Singeo-specific 'your instrument' phrase
|
| 197 |
+
if self.core_config.brand == "singeo":
|
| 198 |
+
instrument_check = self._check_instrument_phrases_singeo(header, message)
|
| 199 |
+
if not instrument_check["passed"]:
|
| 200 |
+
return instrument_check
|
| 201 |
+
|
| 202 |
+
# Check for placeholder strings (template variables)
|
| 203 |
+
placeholder_check = self._check_placeholder_strings(header, message)
|
| 204 |
+
if not placeholder_check["passed"]:
|
| 205 |
+
return placeholder_check
|
| 206 |
+
|
| 207 |
+
# All rule-based checks passed
|
| 208 |
+
return {
|
| 209 |
+
"passed": True,
|
| 210 |
+
"reason": None,
|
| 211 |
+
"feedback": None
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
def _check_banned_phrases(self, header: str, message: str) -> Dict[str, Any]:
|
| 215 |
+
"""Check for banned AI jargon and brand-specific phrases."""
|
| 216 |
+
combined_text = (header + " " + message).lower()
|
| 217 |
+
|
| 218 |
+
# Check AI jargon
|
| 219 |
+
ai_jargon = self.core_config.config_file.get("AI_Jargon", [])
|
| 220 |
+
for phrase in ai_jargon:
|
| 221 |
+
if phrase.lower() in combined_text:
|
| 222 |
+
return {
|
| 223 |
+
"passed": False,
|
| 224 |
+
"reason": f"Contains banned phrase: '{phrase}'",
|
| 225 |
+
"feedback": f"Remove the phrase '{phrase}' and replace it with more natural, conversational language."
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
# Check brand-specific banned phrases
|
| 229 |
+
brand_key = f"AI_phrases_{self.core_config.brand}"
|
| 230 |
+
brand_phrases = self.core_config.config_file.get(brand_key, [])
|
| 231 |
+
for phrase in brand_phrases:
|
| 232 |
+
if phrase.lower() in combined_text:
|
| 233 |
+
return {
|
| 234 |
+
"passed": False,
|
| 235 |
+
"reason": f"Contains brand-banned phrase: '{phrase}'",
|
| 236 |
+
"feedback": f"Remove '{phrase}' and use more authentic, brand-appropriate language."
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
return {"passed": True, "reason": None, "feedback": None}
|
| 240 |
+
|
| 241 |
+
def _check_instrument_phrases_singeo(self, header: str, message: str) -> Dict[str, Any]:
|
| 242 |
+
"""
|
| 243 |
+
Check for 'your instrument' and related phrases specifically for Singeo brand.
|
| 244 |
+
This is an extra safety check beyond the banned phrases list.
|
| 245 |
+
"""
|
| 246 |
+
instrument_phrases = [
|
| 247 |
+
"your instrument", "the instrument",
|
| 248 |
+
"practice your instrument", "your singing instrument"
|
| 249 |
+
]
|
| 250 |
+
|
| 251 |
+
combined_text = (header + " " + message).lower()
|
| 252 |
+
|
| 253 |
+
for phrase in instrument_phrases:
|
| 254 |
+
if phrase.lower() in combined_text:
|
| 255 |
+
return {
|
| 256 |
+
"passed": False,
|
| 257 |
+
"reason": f"Contains inappropriate phrase for Singeo: '{phrase}'",
|
| 258 |
+
"feedback": f"For Singeo, avoid using '{phrase}'. Singing/vocals are not instruments. Use more natural language like 'practice' or 'continue learning' without referencing instruments."
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
return {"passed": True, "reason": None, "feedback": None}
|
| 262 |
+
|
| 263 |
+
def _check_placeholder_strings(self, header: str, message: str) -> Dict[str, Any]:
|
| 264 |
+
"""
|
| 265 |
+
Check for placeholder/template strings that should have been replaced.
|
| 266 |
+
Common patterns: [user_name], {user_name}, {{variable}}, [content_title], etc.
|
| 267 |
+
"""
|
| 268 |
+
combined_text = header + " " + message
|
| 269 |
+
|
| 270 |
+
# Pattern to match common placeholder formats
|
| 271 |
+
placeholder_patterns = [
|
| 272 |
+
r'\[[\w_]+\]', # [user_name], [first_name], [content_title]
|
| 273 |
+
r'\{[\w_]+\}', # {user_name}, {name}
|
| 274 |
+
r'\{\{[\w_]+\}\}', # {{user_name}}, {{variable}}
|
| 275 |
+
r'<[\w_]+>', # <user_name>, <placeholder>
|
| 276 |
+
]
|
| 277 |
+
|
| 278 |
+
for pattern in placeholder_patterns:
|
| 279 |
+
matches = re.findall(pattern, combined_text)
|
| 280 |
+
if matches:
|
| 281 |
+
# Get unique placeholders
|
| 282 |
+
unique_placeholders = list(set(matches))
|
| 283 |
+
placeholder_str = ", ".join(f"'{p}'" for p in unique_placeholders[:3]) # Show up to 3
|
| 284 |
+
|
| 285 |
+
return {
|
| 286 |
+
"passed": False,
|
| 287 |
+
"reason": f"Contains unreplaced placeholder(s): {placeholder_str}",
|
| 288 |
+
"feedback": f"Do not use template placeholders like {placeholder_str}. Generate actual content with real names and values. If you don't have specific information, create natural generic text without placeholders."
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
return {"passed": True, "reason": None, "feedback": None}
|
| 292 |
+
|
| 293 |
+
def _get_last_n_previous_messages(self, user: Any, n: int = 2) -> Optional[str]:
|
| 294 |
+
"""
|
| 295 |
+
Extract the last N previous messages from user data.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
user: User data with previous_messages field
|
| 299 |
+
n: Number of most recent messages to extract (default: 2)
|
| 300 |
+
|
| 301 |
+
Returns:
|
| 302 |
+
Formatted string of last N messages, or None if no previous messages
|
| 303 |
+
"""
|
| 304 |
+
previous_messages = user.get("previous_messages", "")
|
| 305 |
+
|
| 306 |
+
# If no previous messages or empty, return None
|
| 307 |
+
if not previous_messages or previous_messages in [None, "", "[]", {}]:
|
| 308 |
+
return None
|
| 309 |
+
|
| 310 |
+
# Convert to string
|
| 311 |
+
previous_messages_str = str(previous_messages)
|
| 312 |
+
|
| 313 |
+
# Try to parse if it's a list/structured format
|
| 314 |
+
try:
|
| 315 |
+
if previous_messages_str.strip().startswith('['):
|
| 316 |
+
parsed_messages = json.loads(previous_messages_str)
|
| 317 |
+
if isinstance(parsed_messages, list) and len(parsed_messages) > 0:
|
| 318 |
+
# Get last N messages
|
| 319 |
+
last_n_messages = parsed_messages[-n:] if len(parsed_messages) >= n else parsed_messages
|
| 320 |
+
return json.dumps(last_n_messages, indent=2)
|
| 321 |
+
except:
|
| 322 |
+
# If parsing fails, return the raw string (truncate if too long)
|
| 323 |
+
if len(previous_messages_str) > 500:
|
| 324 |
+
return previous_messages_str[-500:] # Last 500 chars
|
| 325 |
+
|
| 326 |
+
return previous_messages_str
|
| 327 |
+
|
| 328 |
+
def _llm_based_validation(self, header: str, message: str,
|
| 329 |
+
user: Any, prompt: str) -> Dict[str, Any]:
|
| 330 |
+
"""
|
| 331 |
+
Perform LLM-based validation for instruction adherence, content accuracy, and authenticity.
|
| 332 |
+
|
| 333 |
+
Args:
|
| 334 |
+
header: Generated header
|
| 335 |
+
message: Generated message
|
| 336 |
+
user: User data
|
| 337 |
+
prompt: Original prompt used for generation
|
| 338 |
+
|
| 339 |
+
Returns:
|
| 340 |
+
{
|
| 341 |
+
"passed": bool,
|
| 342 |
+
"reason": Optional[str],
|
| 343 |
+
"feedback": Optional[str]
|
| 344 |
+
}
|
| 345 |
+
"""
|
| 346 |
+
# Build validation prompt
|
| 347 |
+
validation_prompt = self._build_validation_prompt(header, message, user, prompt)
|
| 348 |
+
|
| 349 |
+
# Get validation instructions
|
| 350 |
+
validation_instructions = """
|
| 351 |
+
You are a quality assurance expert for push notifications. Your task is to validate whether the generated message meets all requirements.
|
| 352 |
+
|
| 353 |
+
Analyze the message and return a JSON response with your assessment.
|
| 354 |
+
|
| 355 |
+
Return format:
|
| 356 |
+
{
|
| 357 |
+
"approved": true/false,
|
| 358 |
+
"issues": ["list of any issues found"],
|
| 359 |
+
"feedback": "detailed feedback for improvement (if not approved)"
|
| 360 |
+
}
|
| 361 |
+
"""
|
| 362 |
+
|
| 363 |
+
try:
|
| 364 |
+
# Get LLM validation with validation_response mode
|
| 365 |
+
response = self.llm.get_response(
|
| 366 |
+
prompt=validation_prompt,
|
| 367 |
+
instructions=validation_instructions,
|
| 368 |
+
validation_mode="validation_response" # Use validation response mode
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
if response is None:
|
| 372 |
+
# If LLM fails, default to approval (rule-based already passed)
|
| 373 |
+
self.log_warning("LLM validation failed to return response, defaulting to approval")
|
| 374 |
+
return {"passed": True, "reason": None, "feedback": None}
|
| 375 |
+
|
| 376 |
+
# Parse response
|
| 377 |
+
approved = response.get("approved", True)
|
| 378 |
+
issues = response.get("issues", [])
|
| 379 |
+
feedback = response.get("feedback", "")
|
| 380 |
+
|
| 381 |
+
if not approved:
|
| 382 |
+
# Combine issues into reason
|
| 383 |
+
reason = "; ".join(issues) if issues else "LLM quality check failed"
|
| 384 |
+
return {
|
| 385 |
+
"passed": False,
|
| 386 |
+
"reason": reason,
|
| 387 |
+
"feedback": feedback if feedback else "Please improve the message quality and adherence to guidelines."
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
return {"passed": True, "reason": None, "feedback": None}
|
| 391 |
+
|
| 392 |
+
except Exception as e:
|
| 393 |
+
self.log_warning(f"LLM validation error: {str(e)}, defaulting to approval")
|
| 394 |
+
# Default to approval if LLM validation fails
|
| 395 |
+
return {"passed": True, "reason": None, "feedback": None}
|
| 396 |
+
|
| 397 |
+
def _build_validation_prompt(self, header: str, message: str,
|
| 398 |
+
user: Any, original_prompt: str) -> str:
|
| 399 |
+
"""
|
| 400 |
+
Build the validation prompt for LLM-based checking.
|
| 401 |
+
|
| 402 |
+
Args:
|
| 403 |
+
header: Generated header
|
| 404 |
+
message: Generated message
|
| 405 |
+
user: User data
|
| 406 |
+
original_prompt: The prompt used to generate the message
|
| 407 |
+
|
| 408 |
+
Returns:
|
| 409 |
+
Validation prompt string
|
| 410 |
+
"""
|
| 411 |
+
# Extract key context from original prompt
|
| 412 |
+
has_recommendation = bool(user.get("recommendation_info"))
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
recommendation_text = ""
|
| 416 |
+
if has_recommendation:
|
| 417 |
+
recommendation_text = f"""
|
| 418 |
+
**Content Recommendation:**
|
| 419 |
+
{user.get("recommendation_info", "")[:300]}...
|
| 420 |
+
"""
|
| 421 |
+
|
| 422 |
+
# Check for previous messages for similarity validation
|
| 423 |
+
last_2_messages = self._get_last_n_previous_messages(user, n=2)
|
| 424 |
+
similarity_check = ""
|
| 425 |
+
if last_2_messages:
|
| 426 |
+
similarity_check = f"""
|
| 427 |
+
**Similarity**: Does it sound too similar to previous messages? (Focus on structure/tone, not just words. "Ready to sing?" vs "Ready to practice?" = TOO SIMILAR)
|
| 428 |
+
Previous messages: {last_2_messages}"""
|
| 429 |
+
|
| 430 |
+
prompt = f"""Validate this push notification:
|
| 431 |
+
|
| 432 |
+
**Header:** {header}
|
| 433 |
+
**Message:** {message}
|
| 434 |
+
|
| 435 |
+
**Be LENIENT - only reject OBVIOUS problems.**
|
| 436 |
+
|
| 437 |
+
**Check:**
|
| 438 |
+
- If the name of the artist/instructor is used in the message, is it correct (based on Content Recommendation)?
|
| 439 |
+
recommendation_text: {recommendation_text}
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
- **Time Words**: Does it imply CONTENT is new/recent?
|
| 443 |
+
- FORBIDDEN: "new course", "recent release", "latest content" (content recency)
|
| 444 |
+
- OK: "practice today", "start today" (action timing)
|
| 445 |
+
Only reject if implies content recency, NOT action timing.
|
| 446 |
+
|
| 447 |
+
- {similarity_check}
|
| 448 |
+
|
| 449 |
+
**REJECT if:**
|
| 450 |
+
- incorrect name of the artist/instructor (if applicable)
|
| 451 |
+
- Implies content is new/recent
|
| 452 |
+
- Sounds too similar to previous (if applicable)
|
| 453 |
+
|
| 454 |
+
**Otherwise APPROVE**
|
| 455 |
+
|
| 456 |
+
Feedback (if rejected): 1-2 sentences on how to fix.
|
| 457 |
+
"""
|
| 458 |
+
|
| 459 |
+
return prompt.strip()
|
ai_messaging_system_v2/README.md
ADDED
|
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Intelligent Music Education Messaging Platform
|
| 2 |
+
|
| 3 |
+
An AI-powered messaging platform that generates personalized push notifications for music education platforms based on user engagement patterns, behavior analysis, and content preferences. Built specifically for Musora's music education ecosystem (Singeo, Pianote, Guitareo, Drumeo).
|
| 4 |
+
|
| 5 |
+
## 🎯 Overview
|
| 6 |
+
|
| 7 |
+
This project automatically generates contextually relevant, personalized push notification messages for users eligible for specific campaign stages. The system analyzes user behavior, engagement patterns, learning preferences, and interaction history to create tailored re-engagement campaigns that drive meaningful platform interaction.
|
| 8 |
+
High-level no technical documentation --> https://docs.google.com/document/d/1ifhSrwhU-RN9YpSW84bfHYTs5MyfeLJ5ONgmBDuZ5_I/edit?tab=t.0
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
## 🏗️ System Architecture
|
| 12 |
+
|
| 13 |
+
### Core Pipeline Flow
|
| 14 |
+
|
| 15 |
+
1. **Input Configuration** - Configure message parameters and test mode settings
|
| 16 |
+
2. **User Eligibility** - Fetch eligible users from Snowflake or test CSV files
|
| 17 |
+
3. **Data Collection** - Gather user profiles, interaction history, and content data
|
| 18 |
+
4. **Content Recommendation** - Select personalized content recommendations using AI or default routing
|
| 19 |
+
5. **Prompt Generation** - Create context-aware prompts incorporating user data and recommendations
|
| 20 |
+
6. **Message Generation** - Generate personalized messages using multiple LLM providers
|
| 21 |
+
7. **Post-Processing** - Structure output and validate message format
|
| 22 |
+
8. **Storage & Analytics** - Store results and cost analysis to Snowflake (production mode)
|
| 23 |
+
|
| 24 |
+
## 📁 Project Structure
|
| 25 |
+
|
| 26 |
+
```
|
| 27 |
+
ai_messaging_system_v2/
|
| 28 |
+
├── generate_message_parallel.py # Main pipeline orchestrator with parallel processing
|
| 29 |
+
├── configs/ # Modular configuration system
|
| 30 |
+
│ ├── config_loader.py # Main config loader with helper functions
|
| 31 |
+
│ ├── system/
|
| 32 |
+
│ │ └── system_config.py # System-wide settings
|
| 33 |
+
│ ├── singeo/
|
| 34 |
+
│ │ └── campaigns.py # Singeo brand campaigns
|
| 35 |
+
│ ├── drumeo/
|
| 36 |
+
│ │ └── campaigns.py # Drumeo brand campaigns
|
| 37 |
+
│ ├── guitareo/
|
| 38 |
+
│ │ └── campaigns.py # Guitareo brand campaigns
|
| 39 |
+
│ ├── pianote/
|
| 40 |
+
│ │ └── campaigns.py # Pianote brand campaigns
|
| 41 |
+
│ ├── test_data/
|
| 42 |
+
│ │ └── test_config.py # Test campaign data
|
| 43 |
+
│ └── README.md # Configuration documentation
|
| 44 |
+
├── Messaging_system/ # Core messaging modules
|
| 45 |
+
│ ├── Permes.py # Main orchestration class
|
| 46 |
+
│ ├── CoreConfig.py # Configuration and state management
|
| 47 |
+
│ ├── DataCollector.py # Data fetching and user profile creation
|
| 48 |
+
│ ├── SnowFlakeConnection.py # Database operations and queries
|
| 49 |
+
│ ├── PromptGenerator.py # AI prompt creation and personalization
|
| 50 |
+
│ ├── Message_generator.py # LLM-based message generation
|
| 51 |
+
│ ├── LLM.py # Multi-provider LLM interface (OpenAI, Google)
|
| 52 |
+
│ ├── LLMR.py # AI-powered content recommender
|
| 53 |
+
│ └── Homepage_Recommender.py # Default recommendation fallback
|
| 54 |
+
└── Data/ # Test data
|
| 55 |
+
├── test_camp.json # Test campaign configuration
|
| 56 |
+
└── test_staff.csv # Test user data
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
## 🔧 Core Components
|
| 60 |
+
|
| 61 |
+
### 1. Permes (Main Orchestrator)
|
| 62 |
+
The central class that coordinates the entire message generation pipeline:
|
| 63 |
+
- **Purpose**: Orchestrates data collection, recommendation, prompt generation, and message creation
|
| 64 |
+
- **Key Methods**:
|
| 65 |
+
- `create_personalize_messages()`: Main entry point for message generation
|
| 66 |
+
- `_create_personalized_message()`: Handles first-time user messages
|
| 67 |
+
- `_create_followup_personalized_message()`: Manages follow-up campaign messages
|
| 68 |
+
- **Features**: Cost calculation, token tracking, Snowflake integration
|
| 69 |
+
|
| 70 |
+
### 2. CoreConfig (Configuration Manager)
|
| 71 |
+
Manages system configuration and state throughout the pipeline:
|
| 72 |
+
- **Purpose**: Centralized configuration and state management
|
| 73 |
+
- **Key Features**:
|
| 74 |
+
- LLM model configuration (OpenAI, Google)
|
| 75 |
+
- Brand-specific settings (Drumeo, Pianote, Guitareo, Singeo)
|
| 76 |
+
- Token usage tracking and rate limiting
|
| 77 |
+
- Personalization settings
|
| 78 |
+
|
| 79 |
+
### 3. DataCollector (Data Aggregation)
|
| 80 |
+
Handles user data collection and profile creation:
|
| 81 |
+
- **Purpose**: Fetch and prepare user data from multiple sources
|
| 82 |
+
- **Key Features**:
|
| 83 |
+
- User ID extraction and validation
|
| 84 |
+
- Multi-source data merging (users, interactions, recommendations)
|
| 85 |
+
- Birthday reminder calculation
|
| 86 |
+
- Historical message data for follow-ups
|
| 87 |
+
|
| 88 |
+
### 4. SnowFlakeConnection (Database Interface)
|
| 89 |
+
Manages all database operations and queries:
|
| 90 |
+
- **Purpose**: Interface with Snowflake data warehouse
|
| 91 |
+
- **Key Features**:
|
| 92 |
+
- User eligibility queries with stage progression logic
|
| 93 |
+
- Content and interaction data retrieval
|
| 94 |
+
- Message storage and cost tracking
|
| 95 |
+
- Campaign management queries
|
| 96 |
+
|
| 97 |
+
### 5. PromptGenerator (AI Prompt Creation)
|
| 98 |
+
Creates personalized prompts for LLM message generation:
|
| 99 |
+
- **Purpose**: Generate context-aware prompts incorporating user data
|
| 100 |
+
- **Key Features**:
|
| 101 |
+
- User profile integration
|
| 102 |
+
- Content recommendation instructions
|
| 103 |
+
- Campaign-wide instruction injection
|
| 104 |
+
- Per-message (stage-specific) instruction injection
|
| 105 |
+
- Follow-up message context
|
| 106 |
+
- Brand voice integration
|
| 107 |
+
|
| 108 |
+
### 6. Message_generator (LLM Interface)
|
| 109 |
+
Handles LLM communication and message generation:
|
| 110 |
+
- **Purpose**: Generate personalized messages using AI models
|
| 111 |
+
- **Key Features**:
|
| 112 |
+
- Multi-provider LLM support
|
| 113 |
+
- JSON response parsing and validation
|
| 114 |
+
- Output structure management
|
| 115 |
+
- Character limit enforcement
|
| 116 |
+
|
| 117 |
+
### 7. LLM (Multi-Provider Support)
|
| 118 |
+
Provides unified interface to multiple LLM providers:
|
| 119 |
+
- **Purpose**: Abstract LLM provider differences
|
| 120 |
+
- **Supported Providers**: OpenAI (GPT models), Google (Gemini models)
|
| 121 |
+
- **Key Features**:
|
| 122 |
+
- Automatic fallback between providers
|
| 123 |
+
- Retry logic and error handling
|
| 124 |
+
- Token usage tracking
|
| 125 |
+
- Response validation
|
| 126 |
+
|
| 127 |
+
### 8. LLMR (AI Content Recommender)
|
| 128 |
+
AI-powered content recommendation system:
|
| 129 |
+
- **Purpose**: Select optimal content for user recommendations
|
| 130 |
+
- **Features**:
|
| 131 |
+
- User profile-based AI recommendations
|
| 132 |
+
- Specific content mode (force same content for all users)
|
| 133 |
+
- Random selection from top choices
|
| 134 |
+
- Content filtering and validation
|
| 135 |
+
- Integration with recommendation systems
|
| 136 |
+
|
| 137 |
+
### 9. Homepage_Recommender (Default Fallback)
|
| 138 |
+
Provides default recommendation when AI selection fails:
|
| 139 |
+
- **Purpose**: Ensure users always receive a recommendation
|
| 140 |
+
- **Feature**: Routes users to personalized "For You" section
|
| 141 |
+
|
| 142 |
+
## 🚀 Getting Started
|
| 143 |
+
|
| 144 |
+
### Prerequisites
|
| 145 |
+
|
| 146 |
+
- Python 3.8+
|
| 147 |
+
- Snowflake account and credentials
|
| 148 |
+
- OpenAI API key
|
| 149 |
+
- Google AI API key (optional, for Gemini models)
|
| 150 |
+
|
| 151 |
+
### Required Dependencies
|
| 152 |
+
|
| 153 |
+
```bash
|
| 154 |
+
pip install pandas snowflake-snowpark-python openai google-genai python-dotenv tqdm
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
### Environment Variables
|
| 158 |
+
|
| 159 |
+
Create a `.env` file with the following variables:
|
| 160 |
+
|
| 161 |
+
```env
|
| 162 |
+
# Snowflake Configuration
|
| 163 |
+
SNOWFLAKE_USER=your_snowflake_user
|
| 164 |
+
SNOWFLAKE_PASSWORD=your_password
|
| 165 |
+
SNOWFLAKE_ACCOUNT=your_account
|
| 166 |
+
SNOWFLAKE_ROLE=your_role
|
| 167 |
+
SNOWFLAKE_DATABASE=your_database
|
| 168 |
+
SNOWFLAKE_WAREHOUSE=your_warehouse
|
| 169 |
+
SNOWFLAKE_SCHEMA=your_schema
|
| 170 |
+
|
| 171 |
+
# AI Provider Keys
|
| 172 |
+
OPENAI_API_KEY=your_openai_key
|
| 173 |
+
GOOGLE_API_KEY=your_google_key
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
### Basic Usage
|
| 177 |
+
|
| 178 |
+
1. **Configure System**: Edit configuration files in `configs/` directory:
|
| 179 |
+
- `configs/system/system_config.py` - System-wide settings
|
| 180 |
+
- `configs/{brand}/campaigns.py` - Brand-specific campaigns
|
| 181 |
+
- See `configs/README.md` for detailed configuration guide
|
| 182 |
+
|
| 183 |
+
2. **Set Parameters**: Modify `generate_message_parallel.py` main section:
|
| 184 |
+
|
| 185 |
+
```python
|
| 186 |
+
if __name__ == "__main__":
|
| 187 |
+
# Input parameters
|
| 188 |
+
message_numbers = range(1, 2) # Stage number(s) to process
|
| 189 |
+
test_mode = False # If True, uses test campaign config
|
| 190 |
+
run_for_all_messages = False # If True, runs for all 11 stages
|
| 191 |
+
brand = "drumeo" # Brand: "singeo", "guitareo", "pianote", "drumeo"
|
| 192 |
+
campaign_type = "re_engagement" # Campaign type
|
| 193 |
+
|
| 194 |
+
# Parallel processing parameters
|
| 195 |
+
chunk_size = 1000 # Maximum chunk size (actual size determined dynamically)
|
| 196 |
+
max_workers = 5 # Number of parallel workers (None = auto-detect)
|
| 197 |
+
|
| 198 |
+
# Generate messages
|
| 199 |
+
for message_number in message_numbers:
|
| 200 |
+
results = generate_messages_parallel(
|
| 201 |
+
message_number=message_number,
|
| 202 |
+
test_mode=test_mode,
|
| 203 |
+
run_for_all_messages=run_for_all_messages,
|
| 204 |
+
brand=brand,
|
| 205 |
+
campaign_type=campaign_type,
|
| 206 |
+
chunk_size=chunk_size,
|
| 207 |
+
max_workers=max_workers
|
| 208 |
+
)
|
| 209 |
+
```
|
| 210 |
+
|
| 211 |
+
3. **Run Pipeline**:
|
| 212 |
+
```bash
|
| 213 |
+
python generate_message_parallel.py
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
### Test Mode
|
| 217 |
+
|
| 218 |
+
Enable test mode to use embedded test data instead of Snowflake:
|
| 219 |
+
|
| 220 |
+
```python
|
| 221 |
+
test_mode = True # Uses TEST_STAFF_DATA from configs/test_data/test_config.py
|
| 222 |
+
```
|
| 223 |
+
|
| 224 |
+
## 🎵 Brand Support
|
| 225 |
+
|
| 226 |
+
The platform supports all Musora education brands:
|
| 227 |
+
|
| 228 |
+
| Brand | Instrument | Emoji | Base URL |
|
| 229 |
+
|----------|------------|-------|----------|
|
| 230 |
+
| Drumeo | Drums | 🥁 | drumeo |
|
| 231 |
+
| Pianote | Piano | 🎹 | pianote |
|
| 232 |
+
| Guitareo | Guitar | 🎸 | guitareo |
|
| 233 |
+
| Singeo | Singing | 🎤 | singeo |
|
| 234 |
+
|
| 235 |
+
## 🤖 AI Model Support
|
| 236 |
+
|
| 237 |
+
### OpenAI Models
|
| 238 |
+
- GPT-4o-mini
|
| 239 |
+
- GPT-4.1-mini
|
| 240 |
+
- GPT-5-mini
|
| 241 |
+
- GPT-5-nano (default fallback)
|
| 242 |
+
|
| 243 |
+
### Google Models
|
| 244 |
+
- Gemini-2.5-flash
|
| 245 |
+
- Gemini-2.0-flash
|
| 246 |
+
- Gemini-2.5-flash-lite
|
| 247 |
+
|
| 248 |
+
### Model Selection Strategy
|
| 249 |
+
- Primary model attempts with retries
|
| 250 |
+
- Automatic fallback to alternative models
|
| 251 |
+
- Cost optimization through model pricing tiers
|
| 252 |
+
- Failure threshold-based model switching
|
| 253 |
+
|
| 254 |
+
## 📊 Features
|
| 255 |
+
|
| 256 |
+
### Personalization
|
| 257 |
+
- User profile integration
|
| 258 |
+
- Learning history analysis
|
| 259 |
+
- Engagement pattern recognition
|
| 260 |
+
- Content preference matching
|
| 261 |
+
- Birthday reminders
|
| 262 |
+
- Campaign-wide instructions for consistent messaging
|
| 263 |
+
- Stage-specific instructions for targeted guidance
|
| 264 |
+
|
| 265 |
+
### Multi-Stage Campaigns
|
| 266 |
+
- 11-stage re-engagement sequences
|
| 267 |
+
- Previous message context awareness
|
| 268 |
+
- Stage progression logic
|
| 269 |
+
- Cooldown period management
|
| 270 |
+
|
| 271 |
+
### Content Recommendations
|
| 272 |
+
- AI-powered content selection
|
| 273 |
+
- Specific content promotion (force same content for all users)
|
| 274 |
+
- Popular content fallback
|
| 275 |
+
- Brand-appropriate filtering
|
| 276 |
+
- Content type diversity (courses, workouts, songs, quick tips)
|
| 277 |
+
|
| 278 |
+
### Quality Assurance
|
| 279 |
+
- Character limit enforcement
|
| 280 |
+
- JSON validation
|
| 281 |
+
- Content filtering (banned content)
|
| 282 |
+
- Message uniqueness verification
|
| 283 |
+
|
| 284 |
+
### Analytics & Monitoring
|
| 285 |
+
- Token usage tracking
|
| 286 |
+
- Cost calculation per campaign
|
| 287 |
+
- Performance metrics
|
| 288 |
+
- Error logging and handling
|
| 289 |
+
|
| 290 |
+
## 🔄 Campaign Flow
|
| 291 |
+
|
| 292 |
+
1. **Stage 1**: Initial engagement message with personalized content
|
| 293 |
+
2. **Stages 2-11**: Follow-up messages with:
|
| 294 |
+
- Context from previous messages
|
| 295 |
+
- Fresh content recommendations
|
| 296 |
+
- Varied messaging approaches
|
| 297 |
+
- Escalating engagement tactics
|
| 298 |
+
|
| 299 |
+
## 💰 Cost Management
|
| 300 |
+
|
| 301 |
+
- Real-time token usage tracking
|
| 302 |
+
- Per-message cost calculation
|
| 303 |
+
- Model pricing optimization
|
| 304 |
+
- Rate limiting and throttling
|
| 305 |
+
- Batch processing efficiency
|
| 306 |
+
|
| 307 |
+
## 🛡️ Error Handling
|
| 308 |
+
|
| 309 |
+
- Multi-level retry logic
|
| 310 |
+
- Graceful fallback mechanisms
|
| 311 |
+
- Comprehensive logging
|
| 312 |
+
- Data validation at each stage
|
| 313 |
+
- User-friendly error messages
|
| 314 |
+
|
| 315 |
+
## 📈 Performance Optimization
|
| 316 |
+
|
| 317 |
+
### Parallel Processing Architecture
|
| 318 |
+
- **Multiprocessing**: Uses Python's multiprocessing.Pool for true parallel execution
|
| 319 |
+
- **Dynamic Chunking**: Intelligently distributes users among workers for maximum utilization
|
| 320 |
+
- Minimum chunk size: 20 users
|
| 321 |
+
- Maximum chunk size: 1000 users
|
| 322 |
+
- Equal distribution across available workers
|
| 323 |
+
- Example: 1000 users with 5 workers → 5 chunks of 200 (uses all workers)
|
| 324 |
+
- **Independent Workers**: Each worker process creates its own Snowflake session for thread safety
|
| 325 |
+
- **Concurrent Chunk Processing**: Processes multiple chunks simultaneously using CPU cores
|
| 326 |
+
- **Scalable Workers**: Auto-detects CPU count or accepts custom worker count (capped at 8 by default)
|
| 327 |
+
|
| 328 |
+
### Additional Optimizations
|
| 329 |
+
- Efficient database queries with indexed lookups
|
| 330 |
+
- Direct Snowflake writes within each worker process
|
| 331 |
+
- Memory-efficient chunk processing
|
| 332 |
+
- Real-time progress tracking and logging
|
| 333 |
+
- Automatic result aggregation and summary statistics
|
| 334 |
+
|
| 335 |
+
## 🧪 Testing
|
| 336 |
+
|
| 337 |
+
The platform includes comprehensive testing capabilities:
|
| 338 |
+
|
| 339 |
+
- Test mode with local CSV files
|
| 340 |
+
- Mock data generation
|
| 341 |
+
- Campaign simulation
|
| 342 |
+
- Performance benchmarking
|
| 343 |
+
- Cost estimation
|
| 344 |
+
|
| 345 |
+
## 📝 Configuration
|
| 346 |
+
|
| 347 |
+
The system uses a modular configuration structure organized by purpose and brand. All configurations are located in the `configs/` directory.
|
| 348 |
+
|
| 349 |
+
### System Configuration (`configs/system/system_config.py`)
|
| 350 |
+
|
| 351 |
+
Contains global system settings:
|
| 352 |
+
|
| 353 |
+
```python
|
| 354 |
+
SYSTEM_CONFIG = {
|
| 355 |
+
"user_info_features": ["first_name", "country", "instrument", ...],
|
| 356 |
+
"header_limit": 30,
|
| 357 |
+
"message_limit": 110,
|
| 358 |
+
"openai_models": ["gpt-4o-mini", "gpt-5-nano", ...],
|
| 359 |
+
"google_models": ["gemini-2.5-flash-lite", "gemini-2.5-flash", ...],
|
| 360 |
+
"model_failure_threshold": 3,
|
| 361 |
+
"banned_contents": [373883, 358813, ...],
|
| 362 |
+
"AI_Jargon": ["elevate", "enhance", "ignite", ...]
|
| 363 |
+
}
|
| 364 |
+
```
|
| 365 |
+
|
| 366 |
+
### Campaign Configuration (`configs/{brand}/campaigns.py`)
|
| 367 |
+
|
| 368 |
+
Each brand has its own configuration file with campaign-specific settings:
|
| 369 |
+
|
| 370 |
+
```python
|
| 371 |
+
CAMPAIGNS = {
|
| 372 |
+
"re_engagement": {
|
| 373 |
+
"campaign_view": "drumeo_re_engagement",
|
| 374 |
+
"campaign_name": "Drumeo - Inactive Members (for 3 days) - Re-engagement",
|
| 375 |
+
"brand": "drumeo",
|
| 376 |
+
|
| 377 |
+
# Campaign-wide instructions (optional)
|
| 378 |
+
"campaign_instructions": "Keep messages encouraging and upbeat",
|
| 379 |
+
|
| 380 |
+
"1": { # Stage 1 configuration
|
| 381 |
+
"stage": 1,
|
| 382 |
+
"segment_info": "Students who haven't practiced...",
|
| 383 |
+
"recsys_contents": ["workout", "course", "quick_tips"],
|
| 384 |
+
"model": "gemini-2.5-flash-lite",
|
| 385 |
+
|
| 386 |
+
# Stage-specific instructions (optional)
|
| 387 |
+
"instructions": "Focus on the recommended content",
|
| 388 |
+
|
| 389 |
+
# Specific content promotion (optional)
|
| 390 |
+
"specific_content_id": None, # Set to content_id to force specific content for all users
|
| 391 |
+
...
|
| 392 |
+
},
|
| 393 |
+
# Stages 2-11...
|
| 394 |
+
}
|
| 395 |
+
}
|
| 396 |
+
```
|
| 397 |
+
|
| 398 |
+
### Configuration Features
|
| 399 |
+
|
| 400 |
+
#### Campaign-Wide Instructions
|
| 401 |
+
Apply instructions to all stages of a campaign:
|
| 402 |
+
```python
|
| 403 |
+
"campaign_instructions": "Keep the tone friendly and encouraging"
|
| 404 |
+
```
|
| 405 |
+
|
| 406 |
+
#### Per-Message Instructions
|
| 407 |
+
Add stage-specific guidance:
|
| 408 |
+
```python
|
| 409 |
+
"1": {
|
| 410 |
+
"stage": 1,
|
| 411 |
+
"instructions": "Emphasize the recommended content",
|
| 412 |
+
...
|
| 413 |
+
}
|
| 414 |
+
```
|
| 415 |
+
|
| 416 |
+
#### Specific Content Promotion
|
| 417 |
+
Force a specific content for all users in a stage (e.g., for marketing campaigns):
|
| 418 |
+
```python
|
| 419 |
+
"2": {
|
| 420 |
+
"stage": 2,
|
| 421 |
+
"involve_recsys_result": True,
|
| 422 |
+
"specific_content_id": 12345, # All users get this content
|
| 423 |
+
"instructions": "Promote this featured content",
|
| 424 |
+
...
|
| 425 |
+
}
|
| 426 |
+
```
|
| 427 |
+
|
| 428 |
+
When `specific_content_id` is set:
|
| 429 |
+
- Overrides AI recommendations completely
|
| 430 |
+
- All users receive the same content at that stage
|
| 431 |
+
- No LLM tokens used for content selection (cost savings)
|
| 432 |
+
- System validates content exists before generating messages
|
| 433 |
+
- Set to `None` (default) for regular personalized recommendations
|
| 434 |
+
|
| 435 |
+
Both instruction types are automatically injected into LLM prompts after the user profile section, providing contextual guidance for message generation.
|
| 436 |
+
|
| 437 |
+
### Loading Configurations
|
| 438 |
+
|
| 439 |
+
```python
|
| 440 |
+
from configs.config_loader import get_system_config, get_campaign_config
|
| 441 |
+
|
| 442 |
+
# Load system configuration
|
| 443 |
+
system_config = get_system_config()
|
| 444 |
+
|
| 445 |
+
# Load campaign configuration
|
| 446 |
+
campaign = get_campaign_config("drumeo", "re_engagement", test_mode=False)
|
| 447 |
+
|
| 448 |
+
# Access instructions
|
| 449 |
+
campaign_instructions = campaign.get("campaign_instructions")
|
| 450 |
+
stage_instructions = campaign["1"].get("instructions")
|
| 451 |
+
```
|
| 452 |
+
|
| 453 |
+
### Helper Functions
|
| 454 |
+
|
| 455 |
+
- `get_system_config()`: Returns system-wide settings
|
| 456 |
+
- `get_campaign_config(brand, campaign_type, test_mode)`: Retrieves brand-specific campaign configuration
|
| 457 |
+
- `get_all_brands()`: Returns list of available brands
|
| 458 |
+
- `get_campaign_types(brand)`: Returns available campaign types for a brand
|
| 459 |
+
|
| 460 |
+
When `test_mode=True`, the system automatically uses test campaign names and test staff data.
|
| 461 |
+
|
| 462 |
+
### Adding New Configurations
|
| 463 |
+
|
| 464 |
+
See `configs/README.md` for detailed instructions on:
|
| 465 |
+
- Adding new brands
|
| 466 |
+
- Adding new campaign types
|
| 467 |
+
- Modifying system settings
|
| 468 |
+
- Adding campaign or stage-specific instructions
|
| 469 |
+
|
| 470 |
+
## 🔮 Future Enhancements
|
| 471 |
+
|
| 472 |
+
- A/B testing framework
|
| 473 |
+
- Advanced analytics dashboard
|
| 474 |
+
- Real-time personalization
|
| 475 |
+
- Multi-language support
|
| 476 |
+
- Enhanced recommendation algorithms
|
| 477 |
+
- Integration with additional LLM providers
|
| 478 |
+
|
| 479 |
+
## 📄 License
|
| 480 |
+
|
| 481 |
+
This project is proprietary to Musora Media Inc. and is not licensed for public use.
|
| 482 |
+
|
| 483 |
+
## 🤝 Contributing
|
| 484 |
+
|
| 485 |
+
Reach out to [email protected] for questions or contributions.
|
| 486 |
+
|
| 487 |
+
---
|
| 488 |
+
|
| 489 |
+
**Built with ❤️ for music education by the Musora team**
|
ai_messaging_system_v2/UI_MODE_GUIDE.md
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UI Mode Guide
|
| 2 |
+
|
| 3 |
+
This guide explains how to use the **UI Mode** feature of the AI Messaging System. UI Mode allows you to generate personalized messages locally without storing results in Snowflake, making it perfect for UI applications and testing.
|
| 4 |
+
|
| 5 |
+
## Overview
|
| 6 |
+
|
| 7 |
+
UI Mode provides:
|
| 8 |
+
- **Local CSV Storage**: Messages saved to `ai_messaging_system_v2/Data/ui_output/messages.csv`
|
| 9 |
+
- **Multi-Stage Support**: Single CSV file that grows with each stage (like Snowflake)
|
| 10 |
+
- **Full Control**: Customize all campaign parameters dynamically
|
| 11 |
+
- **Emoji Support**: UTF-8 encoding ensures proper emoji display
|
| 12 |
+
- **No Snowflake Writes**: Everything stays local for UI visualization
|
| 13 |
+
|
| 14 |
+
## Key Differences from Production/Test Modes
|
| 15 |
+
|
| 16 |
+
| Feature | Production/Test Mode | UI Mode |
|
| 17 |
+
|---------|---------------------|---------|
|
| 18 |
+
| Data Storage | Snowflake database | Local CSV files |
|
| 19 |
+
| Previous Stage Data | Read from Snowflake | Read from local CSV |
|
| 20 |
+
| Output Location | `MESSAGING_SYSTEM_V2.GENERATED_DATA` | `Data/ui_output/messages.csv` |
|
| 21 |
+
| Use Case | Production campaigns | UI testing & development |
|
| 22 |
+
|
| 23 |
+
## Quick Start
|
| 24 |
+
|
| 25 |
+
### 1. Test UI Mode with generate_message_parallel.py
|
| 26 |
+
|
| 27 |
+
For testing purposes before UI integration:
|
| 28 |
+
|
| 29 |
+
```python
|
| 30 |
+
from ai_messaging_system_v2.generate_message_parallel import generate_messages_parallel
|
| 31 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 32 |
+
|
| 33 |
+
# Clear previous UI output
|
| 34 |
+
Permes.clear_ui_output()
|
| 35 |
+
|
| 36 |
+
# Generate messages for stage 1 in UI mode
|
| 37 |
+
results = generate_messages_parallel(
|
| 38 |
+
message_number=1,
|
| 39 |
+
test_mode=False,
|
| 40 |
+
run_for_all_messages=False,
|
| 41 |
+
brand="drumeo",
|
| 42 |
+
campaign_type="re_engagement",
|
| 43 |
+
chunk_size=1000,
|
| 44 |
+
max_workers=5,
|
| 45 |
+
mode="ui" # Enable UI mode
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
# Output will be in: Data/ui_output/messages.csv
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
### 2. Use UI Mode Directly in Your UI Tool
|
| 52 |
+
|
| 53 |
+
When integrating with your UI tool, call `create_personalize_messages` directly:
|
| 54 |
+
|
| 55 |
+
```python
|
| 56 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 57 |
+
from ai_messaging_system_v2.configs.config_loader import get_system_config
|
| 58 |
+
from snowflake.snowpark import Session
|
| 59 |
+
import pandas as pd
|
| 60 |
+
|
| 61 |
+
# Initialize
|
| 62 |
+
permes = Permes()
|
| 63 |
+
system_config = get_system_config()
|
| 64 |
+
|
| 65 |
+
# Clear previous output (optional, for fresh runs)
|
| 66 |
+
Permes.clear_ui_output()
|
| 67 |
+
|
| 68 |
+
# Prepare your users DataFrame
|
| 69 |
+
users = pd.DataFrame({
|
| 70 |
+
'user_id': [123, 456, 789],
|
| 71 |
+
'email': ['[email protected]', '[email protected]', '[email protected]']
|
| 72 |
+
})
|
| 73 |
+
|
| 74 |
+
# Connect to Snowflake (still needed for user data fetching)
|
| 75 |
+
session = Session.builder.configs(connection_params).create()
|
| 76 |
+
|
| 77 |
+
# Generate messages with custom parameters
|
| 78 |
+
users_with_messages = permes.create_personalize_messages(
|
| 79 |
+
session=session,
|
| 80 |
+
users=users,
|
| 81 |
+
brand="drumeo",
|
| 82 |
+
config_file=system_config,
|
| 83 |
+
platform="push",
|
| 84 |
+
stage=1,
|
| 85 |
+
mode="ui", # Enable UI mode
|
| 86 |
+
|
| 87 |
+
# Customize these parameters from your UI
|
| 88 |
+
recsys_contents=["workout", "course"],
|
| 89 |
+
model="gemini-2.5-flash-lite",
|
| 90 |
+
identifier_column="user_id",
|
| 91 |
+
segment_info="Students who haven't practiced in 3 days",
|
| 92 |
+
sample_example="Header: Get back to practicing!\nMessage: Your drums are waiting for you!",
|
| 93 |
+
involve_recsys_result=True,
|
| 94 |
+
personalization=True,
|
| 95 |
+
campaign_name="My-Custom-Campaign",
|
| 96 |
+
campaign_instructions="Keep messages encouraging and positive",
|
| 97 |
+
per_message_instructions="Focus on the recommended content",
|
| 98 |
+
specific_content_id=None, # Set to content_id to force specific content for all users
|
| 99 |
+
ui_experiment_id=None # Optional: Set for A/B testing (e.g., "messages_a_drumeo_20260111_1756")
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# Messages are now in: Data/ui_output/messages.csv
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
## Configuration
|
| 106 |
+
|
| 107 |
+
### UI Test Config File
|
| 108 |
+
|
| 109 |
+
Location: `ai_messaging_system_v2/configs/test_data/ui_test_campaigns.py`
|
| 110 |
+
|
| 111 |
+
This file contains a test campaign configuration that you can modify for testing:
|
| 112 |
+
|
| 113 |
+
```python
|
| 114 |
+
UI_TEST_CAMPAIGNS = {
|
| 115 |
+
"re_engagement": {
|
| 116 |
+
"campaign_view": "drumeo_re_engagement",
|
| 117 |
+
"campaign_name": "UI-Test-Campaign-Re-engagement",
|
| 118 |
+
"brand": "drumeo",
|
| 119 |
+
"campaign_instructions": "Keep messages encouraging...",
|
| 120 |
+
|
| 121 |
+
"1": {
|
| 122 |
+
"stage": 1,
|
| 123 |
+
"segment_info": "Students who haven't practiced...",
|
| 124 |
+
"recsys_contents": ["workout", "course", "quick_tips"],
|
| 125 |
+
"involve_recsys_result": True,
|
| 126 |
+
"personalization": True,
|
| 127 |
+
"model": "gemini-2.5-flash-lite",
|
| 128 |
+
"instructions": "Focus on recommended content...",
|
| 129 |
+
"specific_content_id": None # Set to content_id to force specific content
|
| 130 |
+
},
|
| 131 |
+
# Add more stages as needed
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
```
|
| 135 |
+
|
| 136 |
+
## Multi-Stage Campaigns
|
| 137 |
+
|
| 138 |
+
UI Mode supports multi-stage campaigns with a single CSV file that grows:
|
| 139 |
+
|
| 140 |
+
```python
|
| 141 |
+
# Stage 1: Initial messages
|
| 142 |
+
Permes.clear_ui_output() # Clear for new campaign
|
| 143 |
+
generate_messages_parallel(message_number=1, mode="ui", ...)
|
| 144 |
+
|
| 145 |
+
# Stage 2: Follow-up messages (reads from stage 1)
|
| 146 |
+
generate_messages_parallel(message_number=2, mode="ui", ...)
|
| 147 |
+
|
| 148 |
+
# Stage 3: Another follow-up (reads from stages 1 & 2)
|
| 149 |
+
generate_messages_parallel(message_number=3, mode="ui", ...)
|
| 150 |
+
|
| 151 |
+
# All stages are in the same CSV file with a "stage" column
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
## A/B Testing Support
|
| 155 |
+
|
| 156 |
+
UI Mode fully supports A/B testing with separate file tracking for each experiment:
|
| 157 |
+
|
| 158 |
+
```python
|
| 159 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 160 |
+
import pandas as pd
|
| 161 |
+
|
| 162 |
+
permes = Permes()
|
| 163 |
+
system_config = get_system_config()
|
| 164 |
+
|
| 165 |
+
# Shared configuration
|
| 166 |
+
users = pd.DataFrame({'user_id': [123, 456, 789]})
|
| 167 |
+
brand = "drumeo"
|
| 168 |
+
campaign_name = "AB-Test-Campaign"
|
| 169 |
+
session = Session.builder.configs(connection_params).create()
|
| 170 |
+
|
| 171 |
+
# Generate Experiment A (Stage 1)
|
| 172 |
+
users_a = permes.create_personalize_messages(
|
| 173 |
+
session=session,
|
| 174 |
+
users=users,
|
| 175 |
+
brand=brand,
|
| 176 |
+
config_file=system_config,
|
| 177 |
+
stage=1,
|
| 178 |
+
mode="ui",
|
| 179 |
+
campaign_name=campaign_name,
|
| 180 |
+
model="gemini-2.5-flash-lite",
|
| 181 |
+
ui_experiment_id="messages_a_drumeo_20260111_1756" # Unique ID for experiment A
|
| 182 |
+
)
|
| 183 |
+
# Saves to: Data/ui_output/messages_a_drumeo_20260111_1756.csv
|
| 184 |
+
|
| 185 |
+
# Generate Experiment B (Stage 1)
|
| 186 |
+
users_b = permes.create_personalize_messages(
|
| 187 |
+
session=session,
|
| 188 |
+
users=users,
|
| 189 |
+
brand=brand,
|
| 190 |
+
config_file=system_config,
|
| 191 |
+
stage=1,
|
| 192 |
+
mode="ui",
|
| 193 |
+
campaign_name=campaign_name,
|
| 194 |
+
model="gpt-4o-mini", # Different model for B
|
| 195 |
+
ui_experiment_id="messages_b_drumeo_20260111_1756" # Unique ID for experiment B
|
| 196 |
+
)
|
| 197 |
+
# Saves to: Data/ui_output/messages_b_drumeo_20260111_1756.csv
|
| 198 |
+
|
| 199 |
+
# Generate Follow-up Stage 2 for Experiment A
|
| 200 |
+
users_a_stage2 = permes.create_personalize_messages(
|
| 201 |
+
session=session,
|
| 202 |
+
users=users,
|
| 203 |
+
brand=brand,
|
| 204 |
+
config_file=system_config,
|
| 205 |
+
stage=2,
|
| 206 |
+
mode="ui",
|
| 207 |
+
campaign_name=campaign_name,
|
| 208 |
+
ui_experiment_id="messages_a_drumeo_20260111_1756" # Same ID to append to A's file
|
| 209 |
+
)
|
| 210 |
+
# Appends to: Data/ui_output/messages_a_drumeo_20260111_1756.csv
|
| 211 |
+
|
| 212 |
+
# Generate Follow-up Stage 2 for Experiment B
|
| 213 |
+
users_b_stage2 = permes.create_personalize_messages(
|
| 214 |
+
session=session,
|
| 215 |
+
users=users,
|
| 216 |
+
brand=brand,
|
| 217 |
+
config_file=system_config,
|
| 218 |
+
stage=2,
|
| 219 |
+
mode="ui",
|
| 220 |
+
campaign_name=campaign_name,
|
| 221 |
+
ui_experiment_id="messages_b_drumeo_20260111_1756" # Same ID to append to B's file
|
| 222 |
+
)
|
| 223 |
+
# Appends to: Data/ui_output/messages_b_drumeo_20260111_1756.csv
|
| 224 |
+
```
|
| 225 |
+
|
| 226 |
+
**Key Points for A/B Testing:**
|
| 227 |
+
- Use `ui_experiment_id` parameter to specify unique filenames for each experiment
|
| 228 |
+
- Use the same `ui_experiment_id` across all stages of the same experiment
|
| 229 |
+
- Each experiment gets its own CSV file that grows with each stage
|
| 230 |
+
- Without `ui_experiment_id`, defaults to `messages.csv`
|
| 231 |
+
|
| 232 |
+
## Output Format
|
| 233 |
+
|
| 234 |
+
### messages.csv
|
| 235 |
+
|
| 236 |
+
All columns from the Snowflake schema, including:
|
| 237 |
+
- `user_id`, `email`, `first_name`
|
| 238 |
+
- `message`, `header` (contains JSON with header/message)
|
| 239 |
+
- `recommendation`, `recommendation_info`
|
| 240 |
+
- `stage`, `campaign_name`, `timestamp`
|
| 241 |
+
- `brand`, `platform`, `permission`
|
| 242 |
+
- All user profile fields
|
| 243 |
+
|
| 244 |
+
Example:
|
| 245 |
+
```csv
|
| 246 |
+
user_id,email,first_name,message,recommendation,stage,campaign_name,...
|
| 247 |
+
123,[email protected],John,"{""1"":{""header"":""Hi John 👋"",...}}",workout_123,1,UI-Test-Campaign,...
|
| 248 |
+
```
|
| 249 |
+
|
| 250 |
+
### message_cost.csv
|
| 251 |
+
|
| 252 |
+
Cost tracking information:
|
| 253 |
+
- `brand`, `campaign_name`, `stage`
|
| 254 |
+
- `model`, `number_of_messages`
|
| 255 |
+
- `total_prompt_tokens`, `total_completion_tokens`
|
| 256 |
+
- `total_cost`, `timestamp`
|
| 257 |
+
|
| 258 |
+
## Helper Functions
|
| 259 |
+
|
| 260 |
+
### Clear UI Output
|
| 261 |
+
|
| 262 |
+
```python
|
| 263 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 264 |
+
|
| 265 |
+
# Clear all CSV files in UI output directory
|
| 266 |
+
Permes.clear_ui_output()
|
| 267 |
+
```
|
| 268 |
+
|
| 269 |
+
### Get UI Output Path
|
| 270 |
+
|
| 271 |
+
```python
|
| 272 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 273 |
+
|
| 274 |
+
# Get the path to UI output directory
|
| 275 |
+
output_path = Permes.get_ui_output_path()
|
| 276 |
+
print(f"UI output directory: {output_path}")
|
| 277 |
+
```
|
| 278 |
+
|
| 279 |
+
## Parameters You Can Control from UI
|
| 280 |
+
|
| 281 |
+
When calling `create_personalize_messages` from your UI tool, you have full control over:
|
| 282 |
+
|
| 283 |
+
1. **Campaign Configuration**:
|
| 284 |
+
- `campaign_name`: Custom campaign identifier
|
| 285 |
+
- `campaign_instructions`: Campaign-wide instructions (optional)
|
| 286 |
+
- `per_message_instructions`: Stage-specific instructions (optional)
|
| 287 |
+
|
| 288 |
+
2. **Message Generation**:
|
| 289 |
+
- `stage`: Message number (1, 2, 3, ...)
|
| 290 |
+
- `model`: LLM model to use ("gemini-2.5-flash-lite", "gpt-5-nano", etc.)
|
| 291 |
+
- `sample_example`: Example message format
|
| 292 |
+
- `segment_info`: Description of user segment
|
| 293 |
+
|
| 294 |
+
3. **Personalization**:
|
| 295 |
+
- `personalization`: Enable/disable personalization
|
| 296 |
+
- `involve_recsys_result`: Include content recommendations
|
| 297 |
+
- `recsys_contents`: Types of content to recommend ["workout", "course", "quick_tips", "song"]
|
| 298 |
+
- `specific_content_id`: Force specific content for all users (overrides AI recommendations)
|
| 299 |
+
|
| 300 |
+
4. **Users**:
|
| 301 |
+
- `users`: DataFrame with user IDs or emails
|
| 302 |
+
- `identifier_column`: "user_id" or "email"
|
| 303 |
+
|
| 304 |
+
## Promoting Specific Content
|
| 305 |
+
|
| 306 |
+
You can force all users to receive the same content recommendation using `specific_content_id`:
|
| 307 |
+
|
| 308 |
+
```python
|
| 309 |
+
# Example: Promote a specific workout for all users
|
| 310 |
+
users_with_messages = permes.create_personalize_messages(
|
| 311 |
+
session=session,
|
| 312 |
+
users=users,
|
| 313 |
+
brand="drumeo",
|
| 314 |
+
config_file=system_config,
|
| 315 |
+
stage=2,
|
| 316 |
+
mode="ui",
|
| 317 |
+
|
| 318 |
+
# Force all users to receive workout ID 12345
|
| 319 |
+
involve_recsys_result=True, # Must be True to recommend content
|
| 320 |
+
specific_content_id=12345, # All users get this content
|
| 321 |
+
|
| 322 |
+
# Optional: Customize message to emphasize the featured content
|
| 323 |
+
per_message_instructions="Emphasize this featured workout as a special opportunity",
|
| 324 |
+
|
| 325 |
+
# Other parameters...
|
| 326 |
+
personalization=True,
|
| 327 |
+
campaign_name="Featured-Workout-Campaign",
|
| 328 |
+
)
|
| 329 |
+
```
|
| 330 |
+
|
| 331 |
+
**How it works:**
|
| 332 |
+
- When `specific_content_id` is set, AI recommendations are completely bypassed
|
| 333 |
+
- All users receive the same content, but messages remain personalized around it
|
| 334 |
+
- System validates that the content exists in your brand's database
|
| 335 |
+
- No LLM tokens used for content selection (faster and cheaper)
|
| 336 |
+
- Raises an error if content_id not found (prevents silent failures)
|
| 337 |
+
|
| 338 |
+
**Use cases:**
|
| 339 |
+
- Marketing campaigns for specific courses or workouts
|
| 340 |
+
- Feature new content releases to all users
|
| 341 |
+
- A/B testing specific content performance
|
| 342 |
+
- Seasonal or event-based promotions
|
| 343 |
+
|
| 344 |
+
## Best Practices
|
| 345 |
+
|
| 346 |
+
### 1. Clear Output Between Campaigns
|
| 347 |
+
```python
|
| 348 |
+
# When starting a new campaign with different parameters
|
| 349 |
+
Permes.clear_ui_output()
|
| 350 |
+
```
|
| 351 |
+
|
| 352 |
+
### 2. Check Output Files
|
| 353 |
+
```python
|
| 354 |
+
import pandas as pd
|
| 355 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 356 |
+
|
| 357 |
+
# Read generated messages
|
| 358 |
+
output_file = Permes.get_ui_output_path() / "messages.csv"
|
| 359 |
+
messages = pd.read_csv(output_file, encoding='utf-8-sig')
|
| 360 |
+
print(f"Generated {len(messages)} messages")
|
| 361 |
+
```
|
| 362 |
+
|
| 363 |
+
### 3. Handle Emojis Properly
|
| 364 |
+
The system uses UTF-8-SIG encoding (UTF-8 with BOM) to ensure proper emoji display. When reading CSV files in your UI:
|
| 365 |
+
|
| 366 |
+
```python
|
| 367 |
+
# Always use UTF-8-SIG encoding when reading to properly handle emojis
|
| 368 |
+
messages = pd.read_csv(file_path, encoding='utf-8-sig')
|
| 369 |
+
|
| 370 |
+
# Or in JavaScript/TypeScript
|
| 371 |
+
const messages = await fs.readFile(filePath, 'utf-8');
|
| 372 |
+
```
|
| 373 |
+
|
| 374 |
+
### 4. Testing Different Configurations
|
| 375 |
+
Modify `configs/test_data/ui_test_campaigns.py` to test different:
|
| 376 |
+
- Number of stages
|
| 377 |
+
- Instructions (campaign-wide and per-stage)
|
| 378 |
+
- Models
|
| 379 |
+
- Content types
|
| 380 |
+
- Sample examples
|
| 381 |
+
- Specific content promotion (`specific_content_id`)
|
| 382 |
+
|
| 383 |
+
## Troubleshooting
|
| 384 |
+
|
| 385 |
+
### Issue: "No previous message data found"
|
| 386 |
+
This is **expected** for stage 1 or when running the first stage of a new campaign. Previous stage data is only needed for stages 2+.
|
| 387 |
+
|
| 388 |
+
### Issue: Emojis not displaying correctly
|
| 389 |
+
Ensure you're reading CSV files with UTF-8-SIG encoding (UTF-8 with BOM):
|
| 390 |
+
```python
|
| 391 |
+
pd.read_csv(file_path, encoding='utf-8-sig')
|
| 392 |
+
```
|
| 393 |
+
|
| 394 |
+
### Issue: CSV file not found
|
| 395 |
+
Make sure the output directory exists:
|
| 396 |
+
```python
|
| 397 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 398 |
+
print(Permes.get_ui_output_path())
|
| 399 |
+
```
|
| 400 |
+
|
| 401 |
+
### Issue: Old data in CSV
|
| 402 |
+
Clear the output directory before starting a new campaign:
|
| 403 |
+
```python
|
| 404 |
+
Permes.clear_ui_output()
|
| 405 |
+
```
|
| 406 |
+
|
| 407 |
+
### Issue: "specific_content_id X not found in content database"
|
| 408 |
+
This error means the content_id doesn't exist for your brand:
|
| 409 |
+
- Verify the content_id exists in your Snowflake content table
|
| 410 |
+
- Ensure you're using the correct content_id for the brand
|
| 411 |
+
- Check that the content is active and not deleted
|
| 412 |
+
- Content IDs are brand-specific (Drumeo content ≠ Pianote content)
|
| 413 |
+
|
| 414 |
+
## Integration Example
|
| 415 |
+
|
| 416 |
+
Here's a complete example of how your UI might integrate with the system:
|
| 417 |
+
|
| 418 |
+
```python
|
| 419 |
+
from ai_messaging_system_v2.Messaging_system.Permes import Permes
|
| 420 |
+
from ai_messaging_system_v2.configs.config_loader import get_system_config
|
| 421 |
+
import pandas as pd
|
| 422 |
+
|
| 423 |
+
class MessageGeneratorUI:
|
| 424 |
+
def __init__(self, session):
|
| 425 |
+
self.permes = Permes()
|
| 426 |
+
self.system_config = get_system_config()
|
| 427 |
+
self.session = session
|
| 428 |
+
|
| 429 |
+
def start_new_campaign(self):
|
| 430 |
+
"""Clear output when starting a new campaign"""
|
| 431 |
+
Permes.clear_ui_output()
|
| 432 |
+
|
| 433 |
+
def generate_stage(self, stage, users_df, ui_params):
|
| 434 |
+
"""Generate messages for a specific stage with UI parameters"""
|
| 435 |
+
|
| 436 |
+
messages = self.permes.create_personalize_messages(
|
| 437 |
+
session=self.session,
|
| 438 |
+
users=users_df,
|
| 439 |
+
brand=ui_params['brand'],
|
| 440 |
+
config_file=self.system_config,
|
| 441 |
+
stage=stage,
|
| 442 |
+
mode="ui",
|
| 443 |
+
|
| 444 |
+
# Parameters from UI form
|
| 445 |
+
campaign_name=ui_params['campaign_name'],
|
| 446 |
+
campaign_instructions=ui_params.get('campaign_instructions'),
|
| 447 |
+
per_message_instructions=ui_params.get('stage_instructions'),
|
| 448 |
+
model=ui_params['model'],
|
| 449 |
+
recsys_contents=ui_params['content_types'],
|
| 450 |
+
sample_example=ui_params['sample_example'],
|
| 451 |
+
segment_info=ui_params['segment_description'],
|
| 452 |
+
involve_recsys_result=ui_params['include_recommendations'],
|
| 453 |
+
personalization=ui_params['enable_personalization'],
|
| 454 |
+
specific_content_id=ui_params.get('specific_content_id') # Optional: force specific content
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
return messages
|
| 458 |
+
|
| 459 |
+
def get_results(self):
|
| 460 |
+
"""Read generated messages from CSV"""
|
| 461 |
+
output_file = Permes.get_ui_output_path() / "messages.csv"
|
| 462 |
+
return pd.read_csv(output_file, encoding='utf-8-sig')
|
| 463 |
+
|
| 464 |
+
# Usage in your UI
|
| 465 |
+
ui = MessageGeneratorUI(session)
|
| 466 |
+
ui.start_new_campaign()
|
| 467 |
+
|
| 468 |
+
# Generate stage 1
|
| 469 |
+
stage1_messages = ui.generate_stage(
|
| 470 |
+
stage=1,
|
| 471 |
+
users_df=selected_users,
|
| 472 |
+
ui_params={
|
| 473 |
+
'brand': 'drumeo',
|
| 474 |
+
'campaign_name': 'My Custom Campaign',
|
| 475 |
+
'model': 'gemini-2.5-flash-lite',
|
| 476 |
+
'content_types': ['workout', 'course'],
|
| 477 |
+
'sample_example': 'Header: Hi!\nMessage: Check this out!',
|
| 478 |
+
'segment_description': 'Inactive users',
|
| 479 |
+
'include_recommendations': True,
|
| 480 |
+
'enable_personalization': True,
|
| 481 |
+
'campaign_instructions': 'Be encouraging',
|
| 482 |
+
'stage_instructions': 'Focus on content',
|
| 483 |
+
'specific_content_id': None # Set to content_id to force specific content for all users
|
| 484 |
+
}
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
# Display in UI
|
| 488 |
+
results = ui.get_results()
|
| 489 |
+
```
|
| 490 |
+
|
| 491 |
+
## Summary
|
| 492 |
+
|
| 493 |
+
UI Mode provides a clean, local alternative to Snowflake storage for UI applications. It maintains the same workflow and data structure as production mode, but stores everything locally in CSV format with proper UTF-8 encoding for emoji support.
|
| 494 |
+
|
| 495 |
+
For questions or issues, contact: [email protected]
|
ai_messaging_system_v2/configs/README.md
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Configuration System Documentation
|
| 2 |
+
|
| 3 |
+
This directory contains the modular configuration system for the AI Messaging Platform.
|
| 4 |
+
|
| 5 |
+
## 📁 Directory Structure
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
configs/
|
| 9 |
+
├── system/
|
| 10 |
+
│ ├── __init__.py
|
| 11 |
+
│ └── system_config.py # System-wide settings
|
| 12 |
+
├── singeo/
|
| 13 |
+
│ ├── __init__.py
|
| 14 |
+
│ └── campaigns.py # Singeo brand campaigns
|
| 15 |
+
├── drumeo/
|
| 16 |
+
│ ├── __init__.py
|
| 17 |
+
│ └── campaigns.py # Drumeo brand campaigns
|
| 18 |
+
├── guitareo/
|
| 19 |
+
│ ├── __init__.py
|
| 20 |
+
│ └── campaigns.py # Guitareo brand campaigns
|
| 21 |
+
├── pianote/
|
| 22 |
+
│ ├── __init__.py
|
| 23 |
+
│ └── campaigns.py # Pianote brand campaigns
|
| 24 |
+
├── test_data/
|
| 25 |
+
│ ├── __init__.py
|
| 26 |
+
│ └── test_config.py # Test campaign data
|
| 27 |
+
├── config_loader.py # Main config loader
|
| 28 |
+
└── README.md # This file
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
## 🚀 Quick Start
|
| 32 |
+
|
| 33 |
+
### Loading System Configuration
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
from configs.config_loader import get_system_config
|
| 37 |
+
|
| 38 |
+
system_config = get_system_config()
|
| 39 |
+
print(system_config['header_limit']) # 30
|
| 40 |
+
print(system_config['message_limit']) # 110
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
### Loading Campaign Configuration
|
| 44 |
+
|
| 45 |
+
```python
|
| 46 |
+
from configs.config_loader import get_campaign_config
|
| 47 |
+
|
| 48 |
+
# Production mode
|
| 49 |
+
campaign = get_campaign_config("drumeo", "re_engagement", test_mode=False)
|
| 50 |
+
|
| 51 |
+
# Test mode (uses test campaign name)
|
| 52 |
+
campaign = get_campaign_config("drumeo", "re_engagement", test_mode=True)
|
| 53 |
+
|
| 54 |
+
# Access stage configuration
|
| 55 |
+
stage_1_config = campaign["1"]
|
| 56 |
+
print(stage_1_config["model"]) # gemini-2.5-flash-lite
|
| 57 |
+
|
| 58 |
+
# Access campaign-wide instructions
|
| 59 |
+
campaign_instructions = campaign.get("campaign_instructions")
|
| 60 |
+
|
| 61 |
+
# Access stage-specific instructions
|
| 62 |
+
stage_instructions = stage_1_config.get("instructions")
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
### Helper Functions
|
| 66 |
+
|
| 67 |
+
```python
|
| 68 |
+
from configs.config_loader import get_all_brands, get_campaign_types
|
| 69 |
+
|
| 70 |
+
# Get all available brands
|
| 71 |
+
brands = get_all_brands()
|
| 72 |
+
# ['singeo', 'drumeo', 'guitareo', 'pianote']
|
| 73 |
+
|
| 74 |
+
# Get campaign types for a brand
|
| 75 |
+
campaign_types = get_campaign_types("drumeo")
|
| 76 |
+
# ['re_engagement']
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
## ✨ Configuration Features
|
| 80 |
+
|
| 81 |
+
### 1. System Configuration
|
| 82 |
+
|
| 83 |
+
Located in `configs/system/system_config.py`, contains:
|
| 84 |
+
- User profile features to extract
|
| 85 |
+
- Message character limits (header/message)
|
| 86 |
+
- LLM model configurations (OpenAI, Google, Claude)
|
| 87 |
+
- AI jargon filters
|
| 88 |
+
- Banned content IDs
|
| 89 |
+
- Model failure thresholds
|
| 90 |
+
|
| 91 |
+
### 2. Brand Campaign Configuration
|
| 92 |
+
|
| 93 |
+
Each brand has its own `campaigns.py` file containing:
|
| 94 |
+
- Campaign metadata (view name, campaign name, brand)
|
| 95 |
+
- Campaign-wide instructions (optional)
|
| 96 |
+
- Stage configurations (1-11) with:
|
| 97 |
+
- Stage-specific settings
|
| 98 |
+
- Per-message instructions (optional)
|
| 99 |
+
- Model selection
|
| 100 |
+
- Personalization settings
|
| 101 |
+
- Content recommendation types
|
| 102 |
+
|
| 103 |
+
### 3. Campaign-Wide Instructions
|
| 104 |
+
|
| 105 |
+
Apply instructions to ALL stages of a campaign:
|
| 106 |
+
|
| 107 |
+
```python
|
| 108 |
+
# In configs/{brand}/campaigns.py
|
| 109 |
+
CAMPAIGNS = {
|
| 110 |
+
"re_engagement": {
|
| 111 |
+
"campaign_view": "drumeo_re_engagement",
|
| 112 |
+
"campaign_name": "Drumeo - Inactive Members (for 3 days) - Re-engagement",
|
| 113 |
+
"brand": "drumeo",
|
| 114 |
+
|
| 115 |
+
# Campaign-wide instructions applied to all stages
|
| 116 |
+
"campaign_instructions": "Keep the tone upbeat and encouraging. Focus on getting users back to practicing.",
|
| 117 |
+
|
| 118 |
+
"1": {
|
| 119 |
+
"stage": 1,
|
| 120 |
+
# ... stage config
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
}
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
### 4. Per-Message (Stage-Specific) Instructions
|
| 127 |
+
|
| 128 |
+
Add instructions for individual stages:
|
| 129 |
+
|
| 130 |
+
```python
|
| 131 |
+
"1": {
|
| 132 |
+
"stage": 1,
|
| 133 |
+
"segment_info": "Students who haven't practiced...",
|
| 134 |
+
"model": "gemini-2.5-flash-lite",
|
| 135 |
+
|
| 136 |
+
# Stage-specific instructions
|
| 137 |
+
"instructions": "For stage 1, emphasize the recommended content and make it feel fresh.",
|
| 138 |
+
}
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
### 5. Specific Content Promotion
|
| 142 |
+
|
| 143 |
+
Force a specific content for all users in a stage (overrides AI recommendations):
|
| 144 |
+
|
| 145 |
+
```python
|
| 146 |
+
"1": {
|
| 147 |
+
"stage": 1,
|
| 148 |
+
"segment_info": "Students who haven't practiced...",
|
| 149 |
+
"involve_recsys_result": True, # Must be True to recommend content
|
| 150 |
+
|
| 151 |
+
# NEW: Force specific content for ALL users at this stage
|
| 152 |
+
"specific_content_id": 12345, # Set to content_id, or None for regular AI recommendations
|
| 153 |
+
|
| 154 |
+
"model": "gemini-2.5-flash-lite",
|
| 155 |
+
"instructions": "Emphasize this special featured content",
|
| 156 |
+
}
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
**How it works:**
|
| 160 |
+
- When `specific_content_id` is set, ALL users receive the same content recommendation
|
| 161 |
+
- Overrides the AI-powered recommendation system entirely
|
| 162 |
+
- No LLM tokens used for content selection (cost savings)
|
| 163 |
+
- System validates that content exists in database for the brand
|
| 164 |
+
- Raises error if content_id not found (no silent failures)
|
| 165 |
+
- Set to `None` (default) to use regular personalized recommendations
|
| 166 |
+
|
| 167 |
+
**Use cases:**
|
| 168 |
+
- Promote a specific course, workout, or song for a marketing campaign
|
| 169 |
+
- Feature new content to all users in a particular stage
|
| 170 |
+
- A/B test specific content performance
|
| 171 |
+
- Seasonal or event-based content promotion
|
| 172 |
+
|
| 173 |
+
**Priority Order:**
|
| 174 |
+
1. If `specific_content_id` is set → Use that content for all users
|
| 175 |
+
2. Else if `involve_recsys_result=True` → Use AI/random recommendations
|
| 176 |
+
3. Else → Use Homepage recommender (redirect to "For You" page)
|
| 177 |
+
|
| 178 |
+
### 6. Instruction Injection in Prompts
|
| 179 |
+
|
| 180 |
+
Both `campaign_instructions` and `per_message_instructions` are automatically injected into LLM prompts **after the user profile section**, providing contextual guidance while maintaining the prompt structure.
|
| 181 |
+
|
| 182 |
+
## 📝 How to Add/Modify Configurations
|
| 183 |
+
|
| 184 |
+
### Adding a New Brand
|
| 185 |
+
|
| 186 |
+
1. Create directory: `configs/newbrand/`
|
| 187 |
+
2. Add `__init__.py` (empty file)
|
| 188 |
+
3. Create `campaigns.py`:
|
| 189 |
+
|
| 190 |
+
```python
|
| 191 |
+
"""
|
| 192 |
+
NewBrand Campaign Configurations
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
CAMPAIGNS = {
|
| 196 |
+
"re_engagement": {
|
| 197 |
+
"campaign_view": "newbrand_re_engagement",
|
| 198 |
+
"campaign_name": "NewBrand - Inactive Members - Re-engagement",
|
| 199 |
+
"brand": "newbrand",
|
| 200 |
+
"campaign_instructions": None,
|
| 201 |
+
"1": {
|
| 202 |
+
# ... stage configs
|
| 203 |
+
}
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
def get_campaigns():
|
| 208 |
+
return CAMPAIGNS
|
| 209 |
+
```
|
| 210 |
+
|
| 211 |
+
4. Update `configs/config_loader.py`:
|
| 212 |
+
|
| 213 |
+
```python
|
| 214 |
+
from .newbrand.campaigns import get_campaigns as get_newbrand_campaigns
|
| 215 |
+
|
| 216 |
+
BRAND_CAMPAIGNS = {
|
| 217 |
+
# ... existing brands
|
| 218 |
+
"newbrand": get_newbrand_campaigns(),
|
| 219 |
+
}
|
| 220 |
+
```
|
| 221 |
+
|
| 222 |
+
### Adding a New Campaign Type
|
| 223 |
+
|
| 224 |
+
Edit `configs/{brand}/campaigns.py`:
|
| 225 |
+
|
| 226 |
+
```python
|
| 227 |
+
CAMPAIGNS = {
|
| 228 |
+
"re_engagement": {
|
| 229 |
+
# ... existing config
|
| 230 |
+
},
|
| 231 |
+
"new_campaign_type": {
|
| 232 |
+
"campaign_view": "brand_new_campaign",
|
| 233 |
+
"campaign_name": "Brand - New Campaign",
|
| 234 |
+
"brand": "brand",
|
| 235 |
+
"campaign_instructions": "Optional campaign-wide instructions",
|
| 236 |
+
"1": {
|
| 237 |
+
"stage": 1,
|
| 238 |
+
"segment_info": "...",
|
| 239 |
+
"instructions": "Optional stage-specific instructions",
|
| 240 |
+
"specific_content_id": None, # Optional: Force specific content for all users
|
| 241 |
+
# ... other stage settings
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
}
|
| 245 |
+
```
|
| 246 |
+
|
| 247 |
+
### Modifying System Settings
|
| 248 |
+
|
| 249 |
+
Edit `configs/system/system_config.py`:
|
| 250 |
+
|
| 251 |
+
```python
|
| 252 |
+
SYSTEM_CONFIG = {
|
| 253 |
+
"header_limit": 30, # Modify as needed
|
| 254 |
+
"message_limit": 110,
|
| 255 |
+
"openai_models": ["gpt-4o-mini", "gpt-5-nano"],
|
| 256 |
+
# ... other settings
|
| 257 |
+
}
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
### Adding or Modifying Instructions
|
| 261 |
+
|
| 262 |
+
Edit the brand's `campaigns.py`:
|
| 263 |
+
|
| 264 |
+
```python
|
| 265 |
+
CAMPAIGNS = {
|
| 266 |
+
"re_engagement": {
|
| 267 |
+
# Add or modify campaign-wide instructions
|
| 268 |
+
"campaign_instructions": "Always be encouraging and avoid negative phrasing",
|
| 269 |
+
|
| 270 |
+
"1": {
|
| 271 |
+
# Add or modify stage-specific instructions
|
| 272 |
+
"instructions": "Focus heavily on the recommended content",
|
| 273 |
+
# ... rest of config
|
| 274 |
+
},
|
| 275 |
+
"2": {
|
| 276 |
+
"instructions": "Emphasize the time since last login",
|
| 277 |
+
# ... rest of config
|
| 278 |
+
}
|
| 279 |
+
}
|
| 280 |
+
}
|
| 281 |
+
```
|
| 282 |
+
|
| 283 |
+
## 🔄 Dynamic Chunking System
|
| 284 |
+
|
| 285 |
+
The parallel processing system includes smart chunking that maximizes worker utilization:
|
| 286 |
+
|
| 287 |
+
**Rules:**
|
| 288 |
+
- Minimum chunk size: 20 users
|
| 289 |
+
- Maximum chunk size: 1000 users
|
| 290 |
+
- Users are distributed equally among available workers
|
| 291 |
+
|
| 292 |
+
**Examples:**
|
| 293 |
+
- 1000 users, 5 workers → 5 chunks of 200 each (uses all workers)
|
| 294 |
+
- 150 users, 5 workers → 5 chunks of 30 each (uses all workers)
|
| 295 |
+
- 50 users, 5 workers → 3 chunks of ~17 each (uses 3 workers)
|
| 296 |
+
- 15 users, 5 workers → 1 chunk of 15 (below minimum, single chunk)
|
| 297 |
+
|
| 298 |
+
This is handled automatically in `generate_message_parallel.py` via the `split_into_chunks()` function.
|
| 299 |
+
|
| 300 |
+
## 🧪 Testing
|
| 301 |
+
|
| 302 |
+
Enable test mode to use test data:
|
| 303 |
+
|
| 304 |
+
```python
|
| 305 |
+
from ai_messaging_system_v2.generate_message_parallel import generate_messages_parallel
|
| 306 |
+
|
| 307 |
+
results = generate_messages_parallel(
|
| 308 |
+
message_number=1,
|
| 309 |
+
test_mode=True, # Uses test campaign name and staff data
|
| 310 |
+
brand="drumeo",
|
| 311 |
+
campaign_type="re_engagement",
|
| 312 |
+
chunk_size=1000, # Maximum, actual size determined dynamically
|
| 313 |
+
max_workers=5
|
| 314 |
+
)
|
| 315 |
+
```
|
| 316 |
+
|
| 317 |
+
Test data configuration: `configs/test_data/test_config.py`
|
| 318 |
+
|
| 319 |
+
## 🏗️ System Architecture
|
| 320 |
+
|
| 321 |
+
### Configuration Flow
|
| 322 |
+
|
| 323 |
+
1. **Load System Config**: `get_system_config()` returns system-wide settings
|
| 324 |
+
2. **Load Campaign Config**: `get_campaign_config(brand, type, test_mode)` retrieves brand+campaign config
|
| 325 |
+
3. **Extract Stage Data**: Campaign config contains all 11 stages with metadata
|
| 326 |
+
4. **Pass Instructions**: Both campaign and stage instructions flow through:
|
| 327 |
+
- `generate_message_parallel.py` → `read_data()`
|
| 328 |
+
- `Permes.py` → `create_personalize_messages()`
|
| 329 |
+
- `CoreConfig.py` → stores as attributes
|
| 330 |
+
- `PromptGenerator.py` → `get_additional_instructions()` injects into prompts
|
| 331 |
+
|
| 332 |
+
### Instruction Injection Flow
|
| 333 |
+
|
| 334 |
+
```
|
| 335 |
+
Campaign Config
|
| 336 |
+
↓
|
| 337 |
+
campaign_instructions (all stages) + per_message_instructions (specific stage)
|
| 338 |
+
↓
|
| 339 |
+
Passed to Permes.create_personalize_messages()
|
| 340 |
+
↓
|
| 341 |
+
Stored in CoreConfig
|
| 342 |
+
↓
|
| 343 |
+
PromptGenerator.get_additional_instructions()
|
| 344 |
+
↓
|
| 345 |
+
Injected after user profile section in prompt
|
| 346 |
+
↓
|
| 347 |
+
Sent to LLM
|
| 348 |
+
```
|
| 349 |
+
|
| 350 |
+
## �� Benefits
|
| 351 |
+
|
| 352 |
+
1. **Modularity**: Configurations separated by purpose (system/brand/test)
|
| 353 |
+
2. **Scalability**: Easy to add new brands or campaigns
|
| 354 |
+
3. **Maintainability**: Changes isolated to specific files
|
| 355 |
+
4. **Extensibility**: New features can be added per campaign or stage
|
| 356 |
+
5. **Clarity**: Each file serves a single, clear purpose
|
| 357 |
+
6. **Flexibility**: Instructions can be campaign-wide or stage-specific
|
| 358 |
+
|
| 359 |
+
## 📞 Support
|
| 360 |
+
|
| 361 |
+
For questions about the configuration system:
|
| 362 |
+
- Contact: [email protected]
|
| 363 |
+
- Main project documentation: `ai_messaging_system_v2/README.md`
|
Messaging_system/StoreLayer.py → ai_messaging_system_v2/configs/__init__.py
RENAMED
|
File without changes
|
ai_messaging_system_v2/configs/config_loader.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuration Loader
|
| 3 |
+
|
| 4 |
+
This module serves as the main entry point for loading all configurations.
|
| 5 |
+
It consolidates system settings, brand campaigns, and test data from the
|
| 6 |
+
modular config structure.
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
from configs.config_loader import get_system_config, get_campaign_config
|
| 10 |
+
|
| 11 |
+
# Get system configuration
|
| 12 |
+
system_config = get_system_config()
|
| 13 |
+
|
| 14 |
+
# Get brand campaign configuration
|
| 15 |
+
campaign_config = get_campaign_config("drumeo", "re_engagement", test_mode=False)
|
| 16 |
+
|
| 17 |
+
Migration from embedded_configs.py:
|
| 18 |
+
Old: from embedded_configs import SYSTEM_CONFIG, get_campaign_config
|
| 19 |
+
New: from configs.config_loader import get_system_config, get_campaign_config
|
| 20 |
+
SYSTEM_CONFIG = get_system_config()
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
import copy
|
| 24 |
+
|
| 25 |
+
# Import system configuration
|
| 26 |
+
from .system.system_config import SYSTEM_CONFIG as _SYSTEM_CONFIG
|
| 27 |
+
|
| 28 |
+
# Import brand campaign configurations
|
| 29 |
+
from .singeo.campaigns import get_campaigns as get_singeo_campaigns
|
| 30 |
+
from .drumeo.campaigns import get_campaigns as get_drumeo_campaigns
|
| 31 |
+
from .guitareo.campaigns import get_campaigns as get_guitareo_campaigns
|
| 32 |
+
from .pianote.campaigns import get_campaigns as get_pianote_campaigns
|
| 33 |
+
|
| 34 |
+
# Import test data
|
| 35 |
+
from .test_data.test_config import TEST_CAMPAIGN_NAME, TEST_STAFF_DATA
|
| 36 |
+
from .test_data.ui_test_campaigns import get_ui_test_campaigns
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# Build the unified BRAND_CAMPAIGNS dictionary
|
| 40 |
+
BRAND_CAMPAIGNS = {
|
| 41 |
+
"singeo": get_singeo_campaigns(),
|
| 42 |
+
"drumeo": get_drumeo_campaigns(),
|
| 43 |
+
"guitareo": get_guitareo_campaigns(),
|
| 44 |
+
"pianote": get_pianote_campaigns(),
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
# UI test campaigns (for UI mode testing)
|
| 48 |
+
UI_TEST_CAMPAIGNS = get_ui_test_campaigns()
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def get_system_config():
|
| 52 |
+
"""
|
| 53 |
+
Retrieve the system configuration.
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
dict: System configuration dictionary containing:
|
| 57 |
+
- user_info_features: List of user profile features
|
| 58 |
+
- interaction_features: List of interaction tracking features
|
| 59 |
+
- header_limit/message_limit: Character limits
|
| 60 |
+
- Model configurations (OpenAI, Google, Claude, etc.)
|
| 61 |
+
- AI jargon filters
|
| 62 |
+
- Banned content IDs
|
| 63 |
+
|
| 64 |
+
Example:
|
| 65 |
+
>>> config = get_system_config()
|
| 66 |
+
>>> print(config['header_limit'])
|
| 67 |
+
30
|
| 68 |
+
"""
|
| 69 |
+
return _SYSTEM_CONFIG
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def get_campaign_config(brand, campaign_type="re_engagement", test_mode=False, mode="production"):
|
| 73 |
+
"""
|
| 74 |
+
Retrieve campaign configuration for a specific brand.
|
| 75 |
+
|
| 76 |
+
When test_mode=True, the campaign_name is automatically changed to TEST_CAMPAIGN_NAME.
|
| 77 |
+
When mode="ui", uses UI test campaign configuration.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
brand (str): Brand name (e.g., "singeo", "guitareo", "pianote", "drumeo")
|
| 81 |
+
campaign_type (str): Campaign type (default: "re_engagement")
|
| 82 |
+
test_mode (bool): If True, replaces campaign_name with test campaign name
|
| 83 |
+
mode (str): Operating mode - "production", "test", or "ui" (default: "production")
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
dict: Campaign configuration (deep copy to avoid modifying original)
|
| 87 |
+
Contains:
|
| 88 |
+
- campaign_view: Snowflake view name
|
| 89 |
+
- campaign_name: Campaign identifier
|
| 90 |
+
- brand: Brand name
|
| 91 |
+
- campaign_instructions: Optional campaign-wide instructions
|
| 92 |
+
- stages 1-11: Individual stage configurations
|
| 93 |
+
|
| 94 |
+
Raises:
|
| 95 |
+
KeyError: If brand or campaign type not found
|
| 96 |
+
|
| 97 |
+
Examples:
|
| 98 |
+
>>> # Get production config
|
| 99 |
+
>>> config = get_campaign_config("singeo", mode="production")
|
| 100 |
+
>>> print(config["campaign_name"])
|
| 101 |
+
'Singeo - Inactive Members (for 3 days) - Re-engagement'
|
| 102 |
+
|
| 103 |
+
>>> # Get test config
|
| 104 |
+
>>> config = get_campaign_config("drumeo", "re_engagement", test_mode=True)
|
| 105 |
+
>>> print(config["campaign_name"])
|
| 106 |
+
'musora-staff-test-campaign'
|
| 107 |
+
|
| 108 |
+
>>> # Get UI config
|
| 109 |
+
>>> config = get_campaign_config("drumeo", "re_engagement", mode="ui")
|
| 110 |
+
>>> print(config["campaign_name"])
|
| 111 |
+
'UI-Test-Campaign-Re-engagement'
|
| 112 |
+
|
| 113 |
+
>>> # Access stage-specific configuration
|
| 114 |
+
>>> stage_1_config = config["1"]
|
| 115 |
+
>>> print(stage_1_config["model"])
|
| 116 |
+
'gemini-2.5-flash-lite'
|
| 117 |
+
"""
|
| 118 |
+
# UI mode: Load from UI test campaigns
|
| 119 |
+
if mode == "ui":
|
| 120 |
+
if campaign_type not in UI_TEST_CAMPAIGNS:
|
| 121 |
+
available_types = list(UI_TEST_CAMPAIGNS.keys())
|
| 122 |
+
raise KeyError(
|
| 123 |
+
f"Campaign type '{campaign_type}' not found in UI test campaigns. "
|
| 124 |
+
f"Available types: {available_types}"
|
| 125 |
+
)
|
| 126 |
+
# Return deep copy of UI test campaign
|
| 127 |
+
return copy.deepcopy(UI_TEST_CAMPAIGNS[campaign_type])
|
| 128 |
+
|
| 129 |
+
# Production/Test mode: Load from brand campaigns
|
| 130 |
+
if brand not in BRAND_CAMPAIGNS:
|
| 131 |
+
available_brands = list(BRAND_CAMPAIGNS.keys())
|
| 132 |
+
raise KeyError(
|
| 133 |
+
f"Brand '{brand}' not found. Available brands: {available_brands}"
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
if campaign_type not in BRAND_CAMPAIGNS[brand]:
|
| 137 |
+
available_types = list(BRAND_CAMPAIGNS[brand].keys())
|
| 138 |
+
raise KeyError(
|
| 139 |
+
f"Campaign type '{campaign_type}' not found for brand '{brand}'. "
|
| 140 |
+
f"Available types: {available_types}"
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Get a deep copy to avoid modifying the original
|
| 144 |
+
config = copy.deepcopy(BRAND_CAMPAIGNS[brand][campaign_type])
|
| 145 |
+
|
| 146 |
+
# Override campaign_name if in test mode
|
| 147 |
+
if test_mode:
|
| 148 |
+
config["campaign_name"] = TEST_CAMPAIGN_NAME
|
| 149 |
+
|
| 150 |
+
return config
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def get_all_brands():
|
| 154 |
+
"""
|
| 155 |
+
Get list of all available brands.
|
| 156 |
+
|
| 157 |
+
Returns:
|
| 158 |
+
list: List of brand names
|
| 159 |
+
|
| 160 |
+
Example:
|
| 161 |
+
>>> brands = get_all_brands()
|
| 162 |
+
>>> print(brands)
|
| 163 |
+
['singeo', 'drumeo', 'guitareo', 'pianote']
|
| 164 |
+
"""
|
| 165 |
+
return list(BRAND_CAMPAIGNS.keys())
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def get_campaign_types(brand):
|
| 169 |
+
"""
|
| 170 |
+
Get available campaign types for a specific brand.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
brand (str): Brand name
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
list: List of campaign types available for the brand
|
| 177 |
+
|
| 178 |
+
Raises:
|
| 179 |
+
KeyError: If brand not found
|
| 180 |
+
|
| 181 |
+
Example:
|
| 182 |
+
>>> campaign_types = get_campaign_types("drumeo")
|
| 183 |
+
>>> print(campaign_types)
|
| 184 |
+
['re_engagement']
|
| 185 |
+
"""
|
| 186 |
+
if brand not in BRAND_CAMPAIGNS:
|
| 187 |
+
available_brands = list(BRAND_CAMPAIGNS.keys())
|
| 188 |
+
raise KeyError(
|
| 189 |
+
f"Brand '{brand}' not found. Available brands: {available_brands}"
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
return list(BRAND_CAMPAIGNS[brand].keys())
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# Backward compatibility exports
|
| 196 |
+
SYSTEM_CONFIG = _SYSTEM_CONFIG
|
| 197 |
+
|
| 198 |
+
__all__ = [
|
| 199 |
+
'get_system_config',
|
| 200 |
+
'get_campaign_config',
|
| 201 |
+
'get_all_brands',
|
| 202 |
+
'get_campaign_types',
|
| 203 |
+
'SYSTEM_CONFIG',
|
| 204 |
+
'BRAND_CAMPAIGNS',
|
| 205 |
+
'UI_TEST_CAMPAIGNS',
|
| 206 |
+
'TEST_CAMPAIGN_NAME',
|
| 207 |
+
'TEST_STAFF_DATA',
|
| 208 |
+
]
|
ai_messaging_system_v2/configs/drumeo/__init__.py
ADDED
|
File without changes
|