Claude AI API: Build Intelligent Applications with Anthropic
Master Claude AI API for building AI applications. Learn chat completions, tool use, vision, streaming, and production best practices.
Moshiour Rahman
Advertisement
Getting Started with Claude API
Claude is Anthropic’s AI assistant, known for its helpfulness, harmlessness, and honesty. The Claude API enables developers to build powerful AI-powered applications.
Installation
pip install anthropic
import anthropic
client = anthropic.Anthropic(
api_key="your-api-key"
)
Basic Messages
Simple Request
import anthropic
client = anthropic.Anthropic()
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[
{"role": "user", "content": "What is machine learning?"}
]
)
print(message.content[0].text)
System Prompts
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
system="You are a helpful Python programming assistant. Provide clear, concise code examples.",
messages=[
{"role": "user", "content": "How do I read a CSV file?"}
]
)
print(message.content[0].text)
Multi-turn Conversations
class ChatBot:
def __init__(self, system_prompt: str = None):
self.client = anthropic.Anthropic()
self.system = system_prompt
self.messages = []
def chat(self, user_message: str) -> str:
self.messages.append({
"role": "user",
"content": user_message
})
response = self.client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=2048,
system=self.system,
messages=self.messages
)
assistant_message = response.content[0].text
self.messages.append({
"role": "assistant",
"content": assistant_message
})
return assistant_message
def clear_history(self):
self.messages = []
# Usage
bot = ChatBot("You are a helpful coding assistant.")
print(bot.chat("How do I create a Python list?"))
print(bot.chat("How do I add items to it?"))
Streaming Responses
def stream_response(prompt: str):
with client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": prompt}]
) as stream:
for text in stream.text_stream:
print(text, end="", flush=True)
print()
# Usage
stream_response("Write a short story about a robot")
# With events
with client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": prompt}]
) as stream:
for event in stream:
if event.type == "content_block_delta":
print(event.delta.text, end="", flush=True)
elif event.type == "message_stop":
print("\n--- Done ---")
Tool Use (Function Calling)
Define Tools
tools = [
{
"name": "get_weather",
"description": "Get the current weather for a location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name, e.g., San Francisco"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit"
}
},
"required": ["location"]
}
},
{
"name": "search_database",
"description": "Search for products in the database",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query"
},
"category": {
"type": "string",
"description": "Product category"
},
"max_results": {
"type": "integer",
"description": "Maximum number of results"
}
},
"required": ["query"]
}
}
]
Handle Tool Calls
def get_weather(location: str, unit: str = "celsius") -> dict:
# Simulated weather data
return {
"location": location,
"temperature": 22,
"unit": unit,
"conditions": "sunny"
}
def search_database(query: str, category: str = None, max_results: int = 5) -> list:
# Simulated search
return [
{"name": f"{query} Product 1", "price": 29.99},
{"name": f"{query} Product 2", "price": 49.99}
]
def process_tool_call(tool_name: str, tool_input: dict):
if tool_name == "get_weather":
return get_weather(**tool_input)
elif tool_name == "search_database":
return search_database(**tool_input)
return {"error": "Unknown tool"}
def chat_with_tools(user_message: str) -> str:
messages = [{"role": "user", "content": user_message}]
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
tools=tools,
messages=messages
)
# Check if Claude wants to use a tool
while response.stop_reason == "tool_use":
# Process tool calls
tool_results = []
for block in response.content:
if block.type == "tool_use":
result = process_tool_call(block.name, block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": json.dumps(result)
})
# Add assistant response and tool results
messages.append({"role": "assistant", "content": response.content})
messages.append({"role": "user", "content": tool_results})
# Get next response
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
tools=tools,
messages=messages
)
# Extract final text response
return response.content[0].text
# Usage
print(chat_with_tools("What's the weather in Tokyo?"))
print(chat_with_tools("Find laptops under $500"))
Vision (Image Analysis)
import base64
import httpx
# From URL
def analyze_image_url(image_url: str, prompt: str) -> str:
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[
{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "url",
"url": image_url
}
},
{
"type": "text",
"text": prompt
}
]
}
]
)
return message.content[0].text
# From file (base64)
def analyze_image_file(image_path: str, prompt: str) -> str:
with open(image_path, "rb") as f:
image_data = base64.standard_b64encode(f.read()).decode("utf-8")
# Determine media type
if image_path.endswith(".png"):
media_type = "image/png"
elif image_path.endswith(".gif"):
media_type = "image/gif"
else:
media_type = "image/jpeg"
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[
{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": media_type,
"data": image_data
}
},
{
"type": "text",
"text": prompt
}
]
}
]
)
return message.content[0].text
# Multiple images
def compare_images(image_urls: list[str]) -> str:
content = []
for url in image_urls:
content.append({
"type": "image",
"source": {"type": "url", "url": url}
})
content.append({
"type": "text",
"text": "Compare these images and describe the differences."
})
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": content}]
)
return message.content[0].text
PDF Analysis
import base64
def analyze_pdf(pdf_path: str, prompt: str) -> str:
with open(pdf_path, "rb") as f:
pdf_data = base64.standard_b64encode(f.read()).decode("utf-8")
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=4096,
messages=[
{
"role": "user",
"content": [
{
"type": "document",
"source": {
"type": "base64",
"media_type": "application/pdf",
"data": pdf_data
}
},
{
"type": "text",
"text": prompt
}
]
}
]
)
return message.content[0].text
# Usage
summary = analyze_pdf("report.pdf", "Summarize the key findings in this document.")
Error Handling
from anthropic import (
APIError,
RateLimitError,
APIConnectionError,
AuthenticationError
)
def safe_api_call(prompt: str, max_retries: int = 3) -> str:
for attempt in range(max_retries):
try:
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": prompt}]
)
return response.content[0].text
except RateLimitError:
wait_time = 2 ** attempt
print(f"Rate limited. Waiting {wait_time}s...")
time.sleep(wait_time)
except APIConnectionError:
print("Connection error. Retrying...")
time.sleep(1)
except AuthenticationError:
raise Exception("Invalid API key")
except APIError as e:
print(f"API error: {e}")
raise
raise Exception("Max retries exceeded")
Async Usage
import asyncio
from anthropic import AsyncAnthropic
async_client = AsyncAnthropic()
async def async_chat(prompt: str) -> str:
message = await async_client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": prompt}]
)
return message.content[0].text
async def batch_requests(prompts: list[str]) -> list[str]:
tasks = [async_chat(prompt) for prompt in prompts]
return await asyncio.gather(*tasks)
# Usage
async def main():
prompts = [
"What is Python?",
"What is JavaScript?",
"What is Rust?"
]
results = await batch_requests(prompts)
for prompt, result in zip(prompts, results):
print(f"Q: {prompt}\nA: {result[:100]}...\n")
asyncio.run(main())
Best Practices
Cost Tracking
class UsageTracker:
# Pricing per million tokens (approximate)
PRICES = {
"claude-sonnet-4-20250514": {"input": 3.0, "output": 15.0},
"claude-3-5-haiku-20241022": {"input": 0.25, "output": 1.25}
}
def __init__(self):
self.total_cost = 0
self.requests = []
def track(self, response, model: str):
usage = response.usage
prices = self.PRICES.get(model, self.PRICES["claude-sonnet-4-20250514"])
input_cost = (usage.input_tokens / 1_000_000) * prices["input"]
output_cost = (usage.output_tokens / 1_000_000) * prices["output"]
total = input_cost + output_cost
self.total_cost += total
self.requests.append({
"model": model,
"input_tokens": usage.input_tokens,
"output_tokens": usage.output_tokens,
"cost": total
})
return total
Summary
| Feature | Description |
|---|---|
| Messages | Text conversations |
| Streaming | Real-time responses |
| Tool Use | Function calling |
| Vision | Image analysis |
| Document analysis | |
| Async | Concurrent requests |
Claude API provides powerful capabilities for building intelligent, helpful applications.
Advertisement
Moshiour Rahman
Software Architect & AI Engineer
Enterprise software architect with deep expertise in financial systems, distributed architecture, and AI-powered applications. Building large-scale systems at Fortune 500 companies. Specializing in LLM orchestration, multi-agent systems, and cloud-native solutions. I share battle-tested patterns from real enterprise projects.
Related Articles
Model Context Protocol (MCP): Build Custom AI Tool Integrations
Master MCP to connect Claude and other LLMs to external tools, databases, and APIs. Complete guide with Python and TypeScript examples for building MCP servers.
PythonBuild Your First MCP Server in Python: Connect Claude to Any API
Learn to build a Model Context Protocol (MCP) server from scratch. Create a GitHub MCP server that lets Claude interact with repositories, issues, and pull requests.
PythonPrompt Engineering: Master the Art of AI Communication
Learn prompt engineering techniques for ChatGPT, Claude, and other LLMs. Master zero-shot, few-shot, chain-of-thought, and advanced prompting strategies.
Comments
Comments are powered by GitHub Discussions.
Configure Giscus at giscus.app to enable comments.