Python 6 min read

LangChain Tutorial: Build AI Applications with Python

Master LangChain for building LLM-powered applications. Learn chains, agents, memory, RAG, and integrate with OpenAI, HuggingFace, and vector databases.

MR

Moshiour Rahman

Advertisement

What is LangChain?

LangChain is a framework for developing applications powered by large language models (LLMs). It provides tools to chain together LLM calls, integrate with external data sources, and build sophisticated AI applications.

Why LangChain?

FeatureBenefit
ChainsCombine multiple LLM calls
AgentsLLMs that can use tools
MemoryMaintain conversation context
RAGRetrieval-augmented generation
Integrations100+ LLMs and tools

Installation

pip install langchain langchain-openai langchain-community
pip install chromadb faiss-cpu  # Vector stores
pip install python-dotenv

Basic Setup

Environment Configuration

import os
from dotenv import load_dotenv

load_dotenv()

# Set your API key
os.environ["OPENAI_API_KEY"] = "your-api-key"

Simple LLM Call

from langchain_openai import ChatOpenAI

llm = ChatOpenAI(model="gpt-4", temperature=0.7)

response = llm.invoke("What is machine learning in one sentence?")
print(response.content)

Prompt Templates

Basic Templates

from langchain.prompts import PromptTemplate

template = """You are a helpful assistant that explains {topic}
to a {audience} in simple terms.

Explain: {question}
"""

prompt = PromptTemplate(
    input_variables=["topic", "audience", "question"],
    template=template
)

formatted = prompt.format(
    topic="machine learning",
    audience="beginner",
    question="What is a neural network?"
)
print(formatted)

Chat Prompt Templates

from langchain.prompts import ChatPromptTemplate

chat_prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a {role} who helps with {specialty}."),
    ("human", "{question}")
])

messages = chat_prompt.format_messages(
    role="Python expert",
    specialty="debugging code",
    question="Why is my list comprehension not working?"
)

Chains

Simple Chain (LCEL)

from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

# Define components
prompt = ChatPromptTemplate.from_template(
    "Tell me a short joke about {topic}"
)
llm = ChatOpenAI(model="gpt-4")
output_parser = StrOutputParser()

# Create chain using LCEL (LangChain Expression Language)
chain = prompt | llm | output_parser

# Run chain
result = chain.invoke({"topic": "programming"})
print(result)

Sequential Chain

from langchain.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser

llm = ChatOpenAI(model="gpt-4")

# First chain: Generate a topic
topic_prompt = ChatPromptTemplate.from_template(
    "Generate a creative blog topic about {subject}"
)
topic_chain = topic_prompt | llm | StrOutputParser()

# Second chain: Write outline
outline_prompt = ChatPromptTemplate.from_template(
    "Write a blog outline for: {topic}"
)
outline_chain = outline_prompt | llm | StrOutputParser()

# Combine chains
full_chain = (
    {"topic": topic_chain}
    | outline_prompt
    | llm
    | StrOutputParser()
)

result = full_chain.invoke({"subject": "AI in healthcare"})
print(result)

Output Parsers

Structured Output

from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from typing import List

class Recipe(BaseModel):
    name: str = Field(description="Name of the recipe")
    ingredients: List[str] = Field(description="List of ingredients")
    steps: List[str] = Field(description="Cooking steps")
    prep_time: int = Field(description="Preparation time in minutes")

parser = PydanticOutputParser(pydantic_object=Recipe)

prompt = ChatPromptTemplate.from_messages([
    ("system", "Extract recipe information.\n{format_instructions}"),
    ("human", "{query}")
])

chain = prompt | llm | parser

result = chain.invoke({
    "query": "How do I make spaghetti carbonara?",
    "format_instructions": parser.get_format_instructions()
})

print(f"Recipe: {result.name}")
print(f"Prep time: {result.prep_time} minutes")

Memory

Conversation Buffer Memory

from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(model="gpt-4")
memory = ConversationBufferMemory()

conversation = ConversationChain(
    llm=llm,
    memory=memory,
    verbose=True
)

# First message
response1 = conversation.predict(input="Hi, my name is John")
print(response1)

# Second message - remembers context
response2 = conversation.predict(input="What's my name?")
print(response2)  # Will remember "John"

Conversation Summary Memory

from langchain.memory import ConversationSummaryMemory

memory = ConversationSummaryMemory(llm=llm)

conversation = ConversationChain(
    llm=llm,
    memory=memory
)

# Long conversations get summarized

Window Memory

from langchain.memory import ConversationBufferWindowMemory

# Only keep last 5 exchanges
memory = ConversationBufferWindowMemory(k=5)

RAG (Retrieval-Augmented Generation)

Document Loading

from langchain_community.document_loaders import (
    TextLoader,
    PyPDFLoader,
    WebBaseLoader
)

# Load text file
text_loader = TextLoader("document.txt")
text_docs = text_loader.load()

# Load PDF
pdf_loader = PyPDFLoader("document.pdf")
pdf_docs = pdf_loader.load()

# Load web page
web_loader = WebBaseLoader("https://example.com/article")
web_docs = web_loader.load()

Text Splitting

from langchain.text_splitter import RecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000,
    chunk_overlap=200,
    length_function=len
)

splits = text_splitter.split_documents(documents)
print(f"Created {len(splits)} chunks")

Vector Store

from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma

embeddings = OpenAIEmbeddings()

# Create vector store
vectorstore = Chroma.from_documents(
    documents=splits,
    embedding=embeddings,
    persist_directory="./chroma_db"
)

# Search similar documents
results = vectorstore.similarity_search("What is machine learning?", k=3)
for doc in results:
    print(doc.page_content[:200])

Complete RAG Chain

from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser

# Setup
embeddings = OpenAIEmbeddings()
vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
llm = ChatOpenAI(model="gpt-4")

# RAG prompt
template = """Answer the question based on the following context:

Context: {context}

Question: {question}

Answer:"""

prompt = ChatPromptTemplate.from_template(template)

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

# RAG chain
rag_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | prompt
    | llm
    | StrOutputParser()
)

# Query
answer = rag_chain.invoke("What are the benefits of machine learning?")
print(answer)

Agents

ReAct Agent with Tools

from langchain_openai import ChatOpenAI
from langchain.agents import create_react_agent, AgentExecutor
from langchain.tools import Tool
from langchain import hub

llm = ChatOpenAI(model="gpt-4", temperature=0)

# Define tools
def search_wikipedia(query: str) -> str:
    """Search Wikipedia for information."""
    # Implement actual search
    return f"Wikipedia results for: {query}"

def calculate(expression: str) -> str:
    """Calculate mathematical expressions."""
    try:
        return str(eval(expression))
    except:
        return "Invalid expression"

tools = [
    Tool(
        name="Wikipedia",
        func=search_wikipedia,
        description="Search Wikipedia for factual information"
    ),
    Tool(
        name="Calculator",
        func=calculate,
        description="Perform mathematical calculations"
    )
]

# Create agent
prompt = hub.pull("hwchase17/react")
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

# Run agent
result = agent_executor.invoke({
    "input": "What is the population of France and what is that number squared?"
})
print(result["output"])

Custom Tools

from langchain.tools import tool
from langchain.pydantic_v1 import BaseModel, Field

class SearchInput(BaseModel):
    query: str = Field(description="Search query")
    max_results: int = Field(default=5, description="Maximum results")

@tool(args_schema=SearchInput)
def custom_search(query: str, max_results: int = 5) -> str:
    """Search the web for current information."""
    # Implement search logic
    return f"Found {max_results} results for: {query}"

tools = [custom_search]

Streaming

Stream Responses

from langchain_openai import ChatOpenAI
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

llm = ChatOpenAI(
    model="gpt-4",
    streaming=True,
    callbacks=[StreamingStdOutCallbackHandler()]
)

# Streams tokens as they're generated
response = llm.invoke("Write a short story about AI")

Async Streaming

import asyncio
from langchain_openai import ChatOpenAI

async def stream_response():
    llm = ChatOpenAI(model="gpt-4")

    async for chunk in llm.astream("Explain quantum computing"):
        print(chunk.content, end="", flush=True)

asyncio.run(stream_response())

Building a Chatbot

from langchain_openai import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough

llm = ChatOpenAI(model="gpt-4")
memory = ConversationBufferMemory(return_messages=True)

prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful AI assistant named Claude."),
    MessagesPlaceholder(variable_name="history"),
    ("human", "{input}")
])

def get_history(input_dict):
    return memory.load_memory_variables({})["history"]

chain = (
    RunnablePassthrough.assign(history=get_history)
    | prompt
    | llm
)

def chat(user_input: str) -> str:
    response = chain.invoke({"input": user_input})
    memory.save_context({"input": user_input}, {"output": response.content})
    return response.content

# Chat loop
while True:
    user_input = input("You: ")
    if user_input.lower() == "quit":
        break
    response = chat(user_input)
    print(f"AI: {response}")

Summary

ComponentPurpose
PromptsTemplate LLM inputs
ChainsCombine multiple operations
MemoryMaintain conversation state
RAGAdd external knowledge
AgentsLLMs that use tools

LangChain makes it easy to build sophisticated AI applications by combining LLMs with tools, memory, and external data.

Advertisement

MR

Moshiour Rahman

Software Architect & AI Engineer

Share:
MR

Moshiour Rahman

Software Architect & AI Engineer

Enterprise software architect with deep expertise in financial systems, distributed architecture, and AI-powered applications. Building large-scale systems at Fortune 500 companies. Specializing in LLM orchestration, multi-agent systems, and cloud-native solutions. I share battle-tested patterns from real enterprise projects.

Related Articles

Comments

Comments are powered by GitHub Discussions.

Configure Giscus at giscus.app to enable comments.