Building AI Applications with OpenAI GPT: A Developer's Guide


Building AI Applications with OpenAI GPT: A Developer's Guide

OpenAI's GPT (Generative Pre-trained Transformer) models have revolutionized AI applications. This guide will show you how to harness their power to build sophisticated AI applications.

Getting Started

First, let's set up our development environment:

# Create virtual environment
python -m venv ai-env
source ai-env/bin/activate  # On Windows: ai-env\Scripts\activate

# Install required packages
pip install openai python-dotenv fastapi uvicorn python-jose[cryptography] passlib[bcrypt]

Basic OpenAI Integration

import os
from dotenv import load_dotenv
import openai

# Load environment variables
load_dotenv()

# Configure OpenAI
openai.api_key = os.getenv('OPENAI_API_KEY')

def generate_response(prompt, max_tokens=150):
    """Generate text using GPT-3"""
    try:
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": prompt}
            ],
            max_tokens=max_tokens,
            temperature=0.7
        )
        return response.choices[0].message.content.strip()
    except Exception as e:
        print(f"Error: {e}")
        return None

Building an AI Chat Application

Let's create a FastAPI-based chat application:

from fastapi import FastAPI, HTTPException, Depends
from fastapi.security import OAuth2PasswordBearer
from pydantic import BaseModel
from typing import List, Optional
import json
import datetime

app = FastAPI()

# Models
class Message(BaseModel):
    role: str
    content: str

class Conversation(BaseModel):
    messages: List[Message]
    max_tokens: Optional[int] = 150
    temperature: Optional[float] = 0.7

class ChatResponse(BaseModel):
    response: str
    conversation_id: str

# Chat manager class
class ChatManager:
    def __init__(self):
        self.conversations = {}
    
    def create_conversation(self) -> str:
        """Create a new conversation and return its ID"""
        conversation_id = str(datetime.datetime.now().timestamp())
        self.conversations[conversation_id] = []
        return conversation_id
    
    def add_message(self, conversation_id: str, message: Message):
        """Add a message to a conversation"""
        if conversation_id not in self.conversations:
            raise HTTPException(status_code=404, detail="Conversation not found")
        self.conversations[conversation_id].append(message.dict())
    
    def get_conversation(self, conversation_id: str) -> List[Message]:
        """Get all messages in a conversation"""
        if conversation_id not in self.conversations:
            raise HTTPException(status_code=404, detail="Conversation not found")
        return self.conversations[conversation_id]

chat_manager = ChatManager()

@app.post("/chat/", response_model=ChatResponse)
async def chat(conversation: Conversation):
    """Chat endpoint that handles conversations with GPT"""
    try:
        # Create conversation if it's new
        conversation_id = chat_manager.create_conversation()
        
        # Add user message to conversation
        for message in conversation.messages:
            chat_manager.add_message(conversation_id, message)
        
        # Generate response using GPT
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=conversation.messages,
            max_tokens=conversation.max_tokens,
            temperature=conversation.temperature
        )
        
        # Extract and save assistant's response
        assistant_message = Message(
            role="assistant",
            content=response.choices[0].message.content
        )
        chat_manager.add_message(conversation_id, assistant_message)
        
        return ChatResponse(
            response=assistant_message.content,
            conversation_id=conversation_id
        )
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/conversations/{conversation_id}")
async def get_conversation(conversation_id: str):
    """Get all messages in a conversation"""
    try:
        return chat_manager.get_conversation(conversation_id)
    except Exception as e:
        raise HTTPException(status_code=404, detail=str(e))

AI Content Generator

Here's an example of a content generation system:

from typing import List, Dict
import openai
import json

class ContentGenerator:
    def __init__(self, api_key: str):
        openai.api_key = api_key
        self.content_types = {
            'article': self._generate_article,
            'social_post': self._generate_social_post,
            'email': self._generate_email
        }
    
    def _generate_article(self, topic: str, keywords: List[str]) -> Dict:
        """Generate a blog article"""
        prompt = f"""Write a comprehensive blog article about {topic}.
        Include these keywords: {', '.join(keywords)}
        Structure the article with:
        1. Introduction
        2. Main points
        3. Conclusion"""
        
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "You are a professional content writer."},
                {"role": "user", "content": prompt}
            ],
            max_tokens=1000,
            temperature=0.7
        )
        
        return {
            'type': 'article',
            'topic': topic,
            'content': response.choices[0].message.content,
            'keywords': keywords
        }
    
    def _generate_social_post(self, topic: str, platform: str) -> Dict:
        """Generate a social media post"""
        prompt = f"""Create a {platform} post about {topic}.
        Make it engaging and include relevant hashtags."""
        
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": f"You are a {platform} expert."},
                {"role": "user", "content": prompt}
            ],
            max_tokens=200,
            temperature=0.8
        )
        
        return {
            'type': 'social_post',
            'platform': platform,
            'content': response.choices[0].message.content,
            'topic': topic
        }
    
    def _generate_email(self, subject: str, tone: str) -> Dict:
        """Generate an email"""
        prompt = f"""Write an email about {subject}.
        Use a {tone} tone.
        Include subject line and body."""
        
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "You are a professional email writer."},
                {"role": "user", "content": prompt}
            ],
            max_tokens=400,
            temperature=0.7
        )
        
        return {
            'type': 'email',
            'subject': subject,
            'content': response.choices[0].message.content,
            'tone': tone
        }
    
    def generate_content(self, content_type: str, **kwargs) -> Dict:
        """Generate content based on type and parameters"""
        if content_type not in self.content_types:
            raise ValueError(f"Unsupported content type: {content_type}")
        
        return self.content_types[content_type](**kwargs)

# Example usage
if __name__ == "__main__":
    generator = ContentGenerator('your-api-key')
    
    # Generate article
    article = generator.generate_content(
        'article',
        topic='Artificial Intelligence in Healthcare',
        keywords=['AI', 'healthcare', 'machine learning', 'diagnosis']
    )
    
    # Generate social post
    social_post = generator.generate_content(
        'social_post',
        topic='New Product Launch',
        platform='LinkedIn'
    )
    
    # Generate email
    email = generator.generate_content(
        'email',
        subject='Quarterly Business Update',
        tone='professional'
    )

AI Code Assistant

Let's build a code assistant that can help with programming tasks:

class CodeAssistant:
    def __init__(self, api_key: str):
        openai.api_key = api_key
    
    def generate_code(self, prompt: str, language: str) -> str:
        """Generate code based on natural language prompt"""
        system_prompt = f"You are an expert {language} programmer. Provide only code as response."
        
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": prompt}
            ],
            max_tokens=500,
            temperature=0.2
        )
        
        return response.choices[0].message.content
    
    def explain_code(self, code: str) -> str:
        """Explain what the code does"""
        prompt = f"Explain this code in detail:\n\n{code}"
        
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "You are a coding tutor."},
                {"role": "user", "content": prompt}
            ],
            max_tokens=300,
            temperature=0.7
        )
        
        return response.choices[0].message.content
    
    def suggest_improvements(self, code: str) -> str:
        """Suggest improvements for the code"""
        prompt = f"""Analyze this code and suggest improvements:
        
        {code}
        
        Consider:
        1. Performance
        2. Readability
        3. Best practices
        4. Security"""
        
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "You are a senior code reviewer."},
                {"role": "user", "content": prompt}
            ],
            max_tokens=400,
            temperature=0.7
        )
        
        return response.choices[0].message.content

# Example usage
assistant = CodeAssistant('your-api-key')

# Generate code
code = assistant.generate_code(
    "Create a function to calculate fibonacci sequence",
    "Python"
)

# Get explanation
explanation = assistant.explain_code(code)

# Get improvement suggestions
improvements = assistant.suggest_improvements(code)

Best Practices

  1. API Key Management

    • Use environment variables
    • Never expose keys in code
    • Implement key rotation
    • Use proper authentication
  2. Error Handling

    • Handle API rate limits
    • Implement retries
    • Log errors properly
    • Provide meaningful error messages
  3. Cost Optimization

    • Monitor token usage
    • Implement caching
    • Use appropriate models
    • Set token limits
  4. Response Processing

    • Validate responses
    • Handle incomplete responses
    • Implement fallbacks
    • Format output appropriately

Common Use Cases

  1. Content Generation

    • Blog articles
    • Social media posts
    • Product descriptions
    • Email templates
  2. Code Assistance

    • Code generation
    • Code review
    • Documentation
    • Debugging help
  3. Conversational AI

    • Customer support
    • Virtual assistants
    • Educational tutors
    • Interview preparation
  4. Text Analysis

    • Sentiment analysis
    • Content moderation
    • Text classification
    • Information extraction

Security Considerations

  1. Input Validation
def sanitize_input(text: str) -> str:
    """Sanitize user input"""
    # Remove potentially harmful characters
    sanitized = ''.join(char for char in text if char.isprintable())
    # Limit length
    return sanitized[:1000]
  1. Rate Limiting
from fastapi import HTTPException
from datetime import datetime, timedelta

class RateLimiter:
    def __init__(self, requests_per_minute: int):
        self.requests = {}
        self.limit = requests_per_minute
    
    def check_rate_limit(self, user_id: str) -> bool:
        now = datetime.now()
        minute_ago = now - timedelta(minutes=1)
        
        # Clean old requests
        self.requests = {
            k: v for k, v in self.requests.items()
            if v > minute_ago
        }
        
        # Check user's requests
        user_requests = len([
            t for t in self.requests.get(user_id, [])
            if t > minute_ago
        ])
        
        if user_requests >= self.limit:
            raise HTTPException(
                status_code=429,
                detail="Rate limit exceeded"
            )
        
        # Add new request
        if user_id not in self.requests:
            self.requests[user_id] = []
        self.requests[user_id].append(now)
        
        return True

Conclusion

OpenAI's GPT models provide powerful capabilities for building AI applications:

  • Natural language processing
  • Code generation and analysis
  • Content creation
  • Conversational AI

Keep exploring new use cases and stay updated with the latest developments in AI technology.


Further Reading