# AI Integration in Telegram Bots: ChatGPT, Claude & Best Practices 2024
Introduction to AI-Powered Bots
Artificial Intelligence integration transforms regular Telegram bots into smart assistants capable of understanding context, generating content, and solving complex tasks.
Popular AI APIs for Integration
1. OpenAI API (ChatGPT)
**Advantages:**
- Excellent natural language understanding
- Multiple models (GPT-3.5, GPT-4)
- Great documentation
- Function calling support
**Setup:**
```python
import openai
from openai import AsyncOpenAI
client = AsyncOpenAI(api_key="your-api-key")
async def get_ai_response(user_message, context=[]):
messages = context + [{"role": "user", "content": user_message}]
response = await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=1000,
temperature=0.7
)
return response.choices[0].message.content
```
2. Anthropic Claude
**Advantages:**
- More safe and ethical
- Excellent with long texts
- Better instruction following
- Fewer hallucinations
**Setup:**
```python
import anthropic
client = anthropic.AsyncAnthropic(api_key="your-api-key")
async def get_claude_response(user_message, context=""):
response = await client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
messages=[
{"role": "user", "content": f"{context}\n\n{user_message}"}
]
)
return response.content[0].text
```
Creating AI Bot
Basic Architecture
import asyncio
from aiogram import Bot, Dispatcher, F
from aiogram.types import Message
from aiogram.filters import Command
class AIBot:
def __init__(self, bot_token, openai_key):
self.bot = Bot(token=bot_token)
self.dp = Dispatcher()
self.openai_client = AsyncOpenAI(api_key=openai_key)
self.user_contexts = {} # Store context for each user
self.setup_handlers()
def setup_handlers(self):
@self.dp.message(Command('start'))
async def start_handler(message: Message):
self.user_contexts[message.from_user.id] = []
await message.answer(
"π€ Hello! I'm an AI assistant powered by ChatGPT.\n"
"Ask me anything!"
)
@self.dp.message(F.text)
async def ai_handler(message: Message):
user_id = message.from_user.id
# Show typing indicator
await self.bot.send_chat_action(message.chat.id, "typing")
try:
# Get user context
context = self.user_contexts.get(user_id, [])
# Add user message
context.append({"role": "user", "content": message.text})
# Limit context (last 10 messages)
context = context[-10:]
# Get AI response
ai_response = await self.get_ai_response(context)
# Add AI response to context
context.append({"role": "assistant", "content": ai_response})
self.user_contexts[user_id] = context
# Send response to user
await message.answer(ai_response)
except Exception as e:
await message.answer("β Error processing your request")
print(f"Error: {e}")
async def get_ai_response(self, context):
system_prompt = {
"role": "system",
"content": "You are a helpful AI assistant. Answer concisely and helpfully."
}
messages = [system_prompt] + context
response = await self.openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=1000,
temperature=0.7
)
return response.choices[0].message.content
async def start_polling(self):
await self.dp.start_polling(self.bot)
# Usage
if __name__ == "__main__":
bot = AIBot("BOT_TOKEN", "OPENAI_KEY")
asyncio.run(bot.start_polling())
```
Advanced Features
1. Function Calling
Allows bot to perform actions:
functions = [
{
"name": "get_weather",
"description": "Get weather for a city",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string", "description": "City name"}
},
"required": ["city"]
}
}
async def get_ai_response_with_functions(self, context):
response = await self.openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=context,
functions=functions,
function_call="auto"
)
if response.choices[0].message.function_call:
function_name = response.choices[0].message.function_call.name
if function_name == "get_weather":
# Execute weather function
city = json.loads(response.choices[0].message.function_call.arguments)["city"]
weather_data = await get_weather_data(city)
return f"Weather in {city}: {weather_data}"
return response.choices[0].message.content
```
2. Token Management
def count_tokens(text, model="gpt-3.5-turbo"):
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
def trim_context(context, max_tokens=3000):
total_tokens = sum(count_tokens(msg["content"]) for msg in context)
while total_tokens > max_tokens and len(context) > 1:
context.pop(0) # Remove oldest message
total_tokens = sum(count_tokens(msg["content"]) for msg in context)
return context
```
Best Practices
1. Security
def sanitize_input(text):
# Remove potentially dangerous content
text = re.sub(r'<[^>]+>', '', text) # HTML tags
text = text.replace('\n\n\n+', '\n\n') # Extra newlines
return text[:1000] # Limit length
def is_safe_request(text):
dangerous_patterns = [
r'ignore.+previous.+instructions',
r'system.+prompt',
r'jailbreak',
]
return not any(re.search(pattern, text.lower()) for pattern in dangerous_patterns)
```
2. Error Handling
async def safe_ai_request(self, context, retries=3):
for attempt in range(retries):
try:
response = await self.openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=context,
max_tokens=1000,
timeout=30 # Request timeout
)
return response.choices[0].message.content
except openai.RateLimitError:
await asyncio.sleep(2 ** attempt) # Exponential backoff
except openai.APIConnectionError:
if attempt == retries - 1:
return "β Cannot connect to AI service"
except Exception as e:
return f"β Error: {str(e)}"
3. Caching
from functools import lru_cache
@lru_cache(maxsize=100)
async def cached_ai_response(prompt_hash):
# Cache frequent requests
pass
def get_prompt_hash(prompt):
return hashlib.md5(prompt.encode()).hexdigest()
```
Monitoring and Analytics
import logging
class AIBotAnalytics:
def __init__(self):
self.request_count = 0
self.token_usage = 0
self.error_count = 0
def log_request(self, user_id, tokens_used, response_time):
self.request_count += 1
self.token_usage += tokens_used
logging.info(f"AI Request - User: {user_id}, Tokens: {tokens_used}, Time: {response_time}ms")
def log_error(self, error_type, user_id):
self.error_count += 1
logging.error(f"AI Error - Type: {error_type}, User: {user_id}")
```
Conclusion
AI integration opens limitless possibilities for Telegram bots. The key is proper context management, token optimization, and ensuring security.
**Need help with AI integration?** Contact SOI.MOI experts: [@mixvlad](https://t.me/mixvlad)