pip install -r requirements.txt# Copy template
cp .env.template .env
# Edit .env and add:
# - GOOGLE_API_KEY (from https://makersuite.google.com/app/apikey)
# - PINECONE_API_KEY (from https://pinecone.io/)
# - PINECONE_ENVIRONMENT (e.g., us-east-1)python main.py initstreamlit run app.py- Upload documents (drag & drop or click)
- Click "Process Documents"
- Ask questions about your documents
- Get AI-powered answers with sources
streamlit run app.pypython main.py init# Single file
python main.py process document.txt
# Directory
python main.py process documents/
# With namespace
python main.py process docs/ --namespace project-1- Open http://localhost:8501
- Click "Upload documents"
- Select .txt, .pdf, or .docx files
- Click "Process Documents"
- Type your question in the chat
- Get answers with sources
from src.rag import RAGChain
chain = RAGChain()
result = chain.query("What is the main topic?")
print(result["answer"])
for doc in result["source_documents"]:
print(f"Source: {doc.metadata['source']}")Edit .env to customize:
# Chunking
CHUNK_SIZE=1000 # Larger = more context
CHUNK_OVERLAP=200 # Overlap for continuity
# Retrieval
RETRIEVAL_TOP_K=5 # More results = slower
# Logging
LOG_LEVEL=INFO # INFO, DEBUG, WARNING, ERROR
# Models
GOOGLE_MODEL_NAME=gemini-2.5-flash
EMBEDDING_MODEL=models/embedding-001# Make sure .env exists with API keys
cp .env.template .env
# Edit .env with your keys- Check
PINECONE_API_KEYis correct - Check
PINECONE_ENVIRONMENTmatches your Pinecone project - Verify internet connection
- Check
GOOGLE_API_KEYis correct - Verify API key has Generative AI enabled
- README.md - Complete documentation
- DOCUMENTATION.md - Technical deep-dive
- PROJECT_SUMMARY.md - Implementation details
- ✅ Follow the 5-minute setup above
- 📤 Upload your documents
- 💬 Ask questions in the chat
- 📖 Check source documents for citations
- 🔧 Customize configuration as needed
Ready to go! Happy document analyzing! 🎉