Python 7 min read

OpenAI API with Python: Build AI-Powered Applications

Master the OpenAI API for building AI applications. Learn GPT-4, embeddings, function calling, assistants API, and production best practices.

MR

Moshiour Rahman

Advertisement

Getting Started with OpenAI API

The OpenAI API provides access to powerful language models like GPT-4 for building AI-powered applications. From chatbots to content generation, the possibilities are extensive.

Installation and Setup

pip install openai python-dotenv
# .env
OPENAI_API_KEY=sk-your-api-key-here
from openai import OpenAI
from dotenv import load_dotenv
import os

load_dotenv()

client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

Chat Completions

Basic Chat

from openai import OpenAI

client = OpenAI()

response = client.chat.completions.create(
    model="gpt-4",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "What is machine learning?"}
    ],
    temperature=0.7,
    max_tokens=500
)

print(response.choices[0].message.content)

Conversation History

class ChatBot:
    def __init__(self, system_prompt: str = "You are a helpful assistant."):
        self.client = OpenAI()
        self.messages = [{"role": "system", "content": system_prompt}]

    def chat(self, user_message: str) -> str:
        self.messages.append({"role": "user", "content": user_message})

        response = self.client.chat.completions.create(
            model="gpt-4",
            messages=self.messages,
            temperature=0.7
        )

        assistant_message = response.choices[0].message.content
        self.messages.append({"role": "assistant", "content": assistant_message})

        return assistant_message

    def clear_history(self):
        self.messages = [self.messages[0]]  # Keep system prompt

# Usage
bot = ChatBot("You are a Python expert.")
print(bot.chat("How do I read a file?"))
print(bot.chat("How do I handle errors?"))

Streaming Responses

def stream_chat(prompt: str):
    stream = client.chat.completions.create(
        model="gpt-4",
        messages=[{"role": "user", "content": prompt}],
        stream=True
    )

    full_response = ""
    for chunk in stream:
        if chunk.choices[0].delta.content:
            content = chunk.choices[0].delta.content
            print(content, end="", flush=True)
            full_response += content

    return full_response

# Usage
response = stream_chat("Write a haiku about programming")

Function Calling

Define Functions

tools = [
    {
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "Get the current weather in a location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "City name, e.g., San Francisco"
                    },
                    "unit": {
                        "type": "string",
                        "enum": ["celsius", "fahrenheit"]
                    }
                },
                "required": ["location"]
            }
        }
    },
    {
        "type": "function",
        "function": {
            "name": "search_products",
            "description": "Search for products in the catalog",
            "parameters": {
                "type": "object",
                "properties": {
                    "query": {
                        "type": "string",
                        "description": "Search query"
                    },
                    "category": {
                        "type": "string",
                        "description": "Product category"
                    },
                    "max_price": {
                        "type": "number",
                        "description": "Maximum price"
                    }
                },
                "required": ["query"]
            }
        }
    }
]

Handle Function Calls

import json

def get_weather(location: str, unit: str = "celsius") -> dict:
    # Simulated weather data
    return {
        "location": location,
        "temperature": 22,
        "unit": unit,
        "conditions": "sunny"
    }

def search_products(query: str, category: str = None, max_price: float = None) -> list:
    # Simulated product search
    return [
        {"name": f"{query} Item 1", "price": 29.99},
        {"name": f"{query} Item 2", "price": 49.99}
    ]

def process_tool_call(tool_call):
    function_name = tool_call.function.name
    arguments = json.loads(tool_call.function.arguments)

    if function_name == "get_weather":
        return get_weather(**arguments)
    elif function_name == "search_products":
        return search_products(**arguments)

    return {"error": "Unknown function"}

def chat_with_functions(user_message: str):
    messages = [{"role": "user", "content": user_message}]

    response = client.chat.completions.create(
        model="gpt-4",
        messages=messages,
        tools=tools,
        tool_choice="auto"
    )

    message = response.choices[0].message

    # Check if function call is needed
    if message.tool_calls:
        messages.append(message)

        for tool_call in message.tool_calls:
            result = process_tool_call(tool_call)
            messages.append({
                "role": "tool",
                "tool_call_id": tool_call.id,
                "content": json.dumps(result)
            })

        # Get final response
        final_response = client.chat.completions.create(
            model="gpt-4",
            messages=messages
        )
        return final_response.choices[0].message.content

    return message.content

# Usage
print(chat_with_functions("What's the weather in Tokyo?"))
print(chat_with_functions("Find laptops under $500"))

Embeddings

Generate Embeddings

def get_embedding(text: str, model: str = "text-embedding-3-small") -> list[float]:
    response = client.embeddings.create(
        model=model,
        input=text
    )
    return response.data[0].embedding

# Single embedding
embedding = get_embedding("Machine learning is fascinating")
print(f"Embedding dimension: {len(embedding)}")

# Batch embeddings
def get_embeddings(texts: list[str]) -> list[list[float]]:
    response = client.embeddings.create(
        model="text-embedding-3-small",
        input=texts
    )
    return [item.embedding for item in response.data]

texts = ["Hello world", "Machine learning", "Python programming"]
embeddings = get_embeddings(texts)
import numpy as np

def cosine_similarity(a: list[float], b: list[float]) -> float:
    a = np.array(a)
    b = np.array(b)
    return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))

class SemanticSearch:
    def __init__(self):
        self.documents = []
        self.embeddings = []

    def add_documents(self, docs: list[str]):
        new_embeddings = get_embeddings(docs)
        self.documents.extend(docs)
        self.embeddings.extend(new_embeddings)

    def search(self, query: str, top_k: int = 3) -> list[tuple[str, float]]:
        query_embedding = get_embedding(query)

        similarities = [
            (doc, cosine_similarity(query_embedding, emb))
            for doc, emb in zip(self.documents, self.embeddings)
        ]

        similarities.sort(key=lambda x: x[1], reverse=True)
        return similarities[:top_k]

# Usage
search = SemanticSearch()
search.add_documents([
    "Python is a programming language",
    "Machine learning uses algorithms to learn from data",
    "OpenAI creates artificial intelligence models",
    "Embeddings represent text as vectors"
])

results = search.search("How does AI work?")
for doc, score in results:
    print(f"{score:.3f}: {doc}")

Assistants API

Create Assistant

# Create assistant
assistant = client.beta.assistants.create(
    name="Code Helper",
    instructions="You are a helpful coding assistant. Help users write and debug code.",
    model="gpt-4",
    tools=[{"type": "code_interpreter"}]
)

print(f"Assistant ID: {assistant.id}")

Run Conversation

# Create thread
thread = client.beta.threads.create()

# Add message
message = client.beta.threads.messages.create(
    thread_id=thread.id,
    role="user",
    content="Write a Python function to calculate fibonacci numbers"
)

# Run assistant
run = client.beta.threads.runs.create(
    thread_id=thread.id,
    assistant_id=assistant.id
)

# Wait for completion
import time

while run.status in ["queued", "in_progress"]:
    time.sleep(1)
    run = client.beta.threads.runs.retrieve(
        thread_id=thread.id,
        run_id=run.id
    )

# Get messages
messages = client.beta.threads.messages.list(thread_id=thread.id)

for msg in messages.data:
    print(f"{msg.role}: {msg.content[0].text.value}")
# Upload file
file = client.files.create(
    file=open("documentation.pdf", "rb"),
    purpose="assistants"
)

# Create vector store
vector_store = client.beta.vector_stores.create(name="Documentation")

# Add file to vector store
client.beta.vector_stores.files.create(
    vector_store_id=vector_store.id,
    file_id=file.id
)

# Create assistant with file search
assistant = client.beta.assistants.create(
    name="Documentation Helper",
    instructions="Answer questions based on the uploaded documentation.",
    model="gpt-4",
    tools=[{"type": "file_search"}],
    tool_resources={
        "file_search": {
            "vector_store_ids": [vector_store.id]
        }
    }
)

Image Generation (DALL-E)

# Generate image
response = client.images.generate(
    model="dall-e-3",
    prompt="A futuristic city with flying cars, digital art style",
    size="1024x1024",
    quality="hd",
    n=1
)

image_url = response.data[0].url
print(f"Image URL: {image_url}")

# Image variations
response = client.images.create_variation(
    model="dall-e-2",
    image=open("original.png", "rb"),
    n=2,
    size="1024x1024"
)

# Image editing
response = client.images.edit(
    model="dall-e-2",
    image=open("original.png", "rb"),
    mask=open("mask.png", "rb"),
    prompt="Add a rainbow in the sky",
    n=1,
    size="1024x1024"
)

Vision (GPT-4 Vision)

import base64

def encode_image(image_path: str) -> str:
    with open(image_path, "rb") as f:
        return base64.b64encode(f.read()).decode("utf-8")

def analyze_image(image_path: str, prompt: str = "Describe this image"):
    base64_image = encode_image(image_path)

    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": prompt},
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{base64_image}",
                            "detail": "high"
                        }
                    }
                ]
            }
        ],
        max_tokens=500
    )

    return response.choices[0].message.content

# Analyze from URL
response = client.chat.completions.create(
    model="gpt-4o",
    messages=[
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "What's in this image?"},
                {
                    "type": "image_url",
                    "image_url": {"url": "https://example.com/image.jpg"}
                }
            ]
        }
    ]
)

Error Handling

from openai import OpenAI, APIError, RateLimitError, APIConnectionError
import time

def chat_with_retry(messages: list, max_retries: int = 3):
    client = OpenAI()

    for attempt in range(max_retries):
        try:
            response = client.chat.completions.create(
                model="gpt-4",
                messages=messages
            )
            return response.choices[0].message.content

        except RateLimitError:
            wait_time = 2 ** attempt
            print(f"Rate limited. Waiting {wait_time}s...")
            time.sleep(wait_time)

        except APIConnectionError:
            print("Connection error. Retrying...")
            time.sleep(1)

        except APIError as e:
            print(f"API error: {e}")
            raise

    raise Exception("Max retries exceeded")

Cost Tracking

class CostTracker:
    # Prices per 1K tokens (approximate)
    PRICES = {
        "gpt-4": {"input": 0.03, "output": 0.06},
        "gpt-4-turbo": {"input": 0.01, "output": 0.03},
        "gpt-3.5-turbo": {"input": 0.0005, "output": 0.0015}
    }

    def __init__(self):
        self.total_cost = 0
        self.requests = []

    def track(self, response, model: str):
        usage = response.usage
        prices = self.PRICES.get(model, self.PRICES["gpt-3.5-turbo"])

        input_cost = (usage.prompt_tokens / 1000) * prices["input"]
        output_cost = (usage.completion_tokens / 1000) * prices["output"]
        total = input_cost + output_cost

        self.total_cost += total
        self.requests.append({
            "model": model,
            "input_tokens": usage.prompt_tokens,
            "output_tokens": usage.completion_tokens,
            "cost": total
        })

        return total

    def get_summary(self):
        return {
            "total_cost": f"${self.total_cost:.4f}",
            "total_requests": len(self.requests),
            "total_tokens": sum(r["input_tokens"] + r["output_tokens"] for r in self.requests)
        }

Summary

FeatureUse Case
Chat CompletionsConversations, Q&A
Function CallingTool integration
EmbeddingsSemantic search, RAG
AssistantsStateful conversations
VisionImage analysis
DALL-EImage generation

The OpenAI API provides powerful building blocks for creating sophisticated AI-powered applications across various domains.

Advertisement

MR

Moshiour Rahman

Software Architect & AI Engineer

Share:
MR

Moshiour Rahman

Software Architect & AI Engineer

Enterprise software architect with deep expertise in financial systems, distributed architecture, and AI-powered applications. Building large-scale systems at Fortune 500 companies. Specializing in LLM orchestration, multi-agent systems, and cloud-native solutions. I share battle-tested patterns from real enterprise projects.

Related Articles

Comments

Comments are powered by GitHub Discussions.

Configure Giscus at giscus.app to enable comments.