Commit f94982a4 authored by Allen Bose(UST's avatar Allen Bose(UST

stock market consulting ai project

parents
gemini_api_key="AIzaSyBrXveSu74dXPccUXcYGq0ZtmaiMCnoXyE"
import google.generativeai as genai
import yfinance as yf
from dotenv import load_dotenv
load_dotenv()
genai.configure(api_key="gemini_api_key")
def analyze_stock(symbol):
stock = yf.Ticker(symbol)
hist = stock.history(period="1mo")
if hist.empty:
return f"No data found for symbol '{symbol}'. Please check the ticker."
avg_price = hist['Close'].mean()
last_price = hist['Close'].iloc[-1]
trend = "upward 📈" if last_price > avg_price else "downward 📉"
# LLM enhancement
prompt = f"""
Provide a short stock market analysis for {symbol} based on this info:
- 1 month average closing price: {avg_price:.2f}
- Last closing price: {last_price:.2f}
- Trend: {trend}
Keep it concise and professional.
"""
model = genai.GenerativeModel("gemini-2.5-flash")
llm_response = model.generate_content(prompt)
return llm_response.text
def risk_assessment(symbol):
stock = yf.Ticker(symbol)
hist = stock.history(period="6mo")['Close']
volatility = hist.pct_change().std() * (252 ** 0.5)
risk_level = "High ⚠️" if volatility > 0.3 else "Moderate 🟡" if volatility > 0.15 else "Low ✅"
# LLM enhancement
prompt = f"""
Explain the risk for {symbol} based on this data:
- 6-month volatility: {volatility:.2f}
- Risk level: {risk_level}
Provide practical guidance for an investor in plain English.
"""
model = genai.GenerativeModel("gemini-2.5-flash")
llm_response = model.generate_content(prompt)
return llm_response.text
def compare_stocks(symbol1, symbol2):
stock1 = yf.Ticker(symbol1)
stock2 = yf.Ticker(symbol2)
hist1 = stock1.history(period="1mo")
hist2 = stock2.history(period="1mo")
if hist1.empty or hist2.empty:
missing = []
if hist1.empty: missing.append(symbol1)
if hist2.empty: missing.append(symbol2)
return f"No data found for {', '.join(missing)}. Please check the ticker(s)."
avg1, last1 = hist1['Close'].mean(), hist1['Close'].iloc[-1]
avg2, last2 = hist2['Close'].mean(), hist2['Close'].iloc[-1]
perf1 = (last1 - hist1['Close'].iloc[0]) / hist1['Close'].iloc[0] * 100
perf2 = (last2 - hist2['Close'].iloc[0]) / hist2['Close'].iloc[0] * 100
better = symbol1 if perf1 > perf2 else symbol2
prompt = f"""
Compare two stocks: {symbol1} and {symbol2}.
- {symbol1}: Avg = {avg1:.2f}, Last = {last1:.2f}, 1-month change = {perf1:.2f}%
- {symbol2}: Avg = {avg2:.2f}, Last = {last2:.2f}, 1-month change = {perf2:.2f}%
Suggest which stock is performing better and why.
"""
model = genai.GenerativeModel("gemini-2.5-flash")
llm_response = model.generate_content(prompt)
return llm_response.text
import streamlit as st
import google.generativeai as genai
from dotenv import load_dotenv
import os
from router import agent_router
# Load API key
load_dotenv()
genai.configure(api_key=os.getenv("gemini_api_key"))
# Streamlit UI setup
st.set_page_config(page_title="Stock Market Consultant 💹", layout="wide")
st.title("💬 Stock Market Consultant")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# --- Chat container: show messages ABOVE the input ---
chat_container = st.container()
# Display previous chat messages (top to bottom)
with chat_container:
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# --- Input area stays at the BOTTOM ---
st.markdown("---") # separator line for clarity
with st.container():
with st.form(key="chat_form", clear_on_submit=True):
user_input = st.text_input(
"💬 Type your question about stocks or crypto:",
placeholder="e.g., search for analysing,risk factors and comparing of stocks",
label_visibility="collapsed"
)
submitted = st.form_submit_button("Send")
if submitted and user_input:
# Display user message
with st.chat_message("user"):
st.markdown(user_input)
st.session_state.messages.append({"role": "user", "content": user_input})
# Get AI/agent response
with st.chat_message("assistant"):
with st.spinner("Analyzing market data..."):
response_text = agent_router(user_input)
st.markdown(response_text)
# Save assistant message
st.session_state.messages.append({"role": "assistant", "content": response_text})
# 💹 Agentic AI Stock Market Consultant
An AI-powered system that performs stock market trend, risk, and comparison analysis using **Gemini API** and **Agentic AI** principles.
## 🧠 Overview
This project demonstrates an intelligent, multi-agent stock consultant system that autonomously analyzes and compares stocks using financial data from Yahoo Finance and reasoning from Google Gemini LLM.
## ⚙️ Features
- 💬 Conversational chat interface using Streamlit
- 📈 Trend analysis for stock prices
- ⚠️ Risk assessment based on volatility
- 🧮 Stock performance comparison
- 🧠 LLM-driven decision-making (Gemini API)
## 🧩 System Architecture
User Query → Router Agent → Gemini Response → Specialized Stock Agents → Gemini Response → Streamlit Chat UI
## 🧠 Agentic AI Design
- Router Agent → decides which analysis tool to use
- Analysis Agent → analyzes price trends
- Risk Agent → calculates volatility
- Comparison Agent → compares two stocks
## 🧰 Tech Stack
- Python 3.10+
- Streamlit
- yFinance
- Google Gemini API
- dotenv
## 🚀 Setup
1. Clone the repo
2. Create a `.env` file with your API key
3. Install dependencies:
```bash
pip install -r requirements.txt
4.streamlit run app.py
---
### **📁 Folder Structure**
Show your project organization.
```markdown
## 📁 Folder Structure
project/
├── app.py # Main Streamlit app
├── router.py # Routes queries to the appropriate agent
├── agents.py # Stock analysis, risk, and comparison functions
├── resolve_ticker.py # LLM-based ticker symbol extraction
├── .env # Environment variables (API keys)
├── requirements.txt # Python dependencies
└── README.md # Project documentation
## 🧮 Example Queries
| User Query | Action Taken |
|-------------|--------------|
| "Analyze AAPL" | Trend analysis |
| "Risk TSLA" | Risk agent |
| "Compare NVDA and AMD" | Comparison agent |
## 📈 Future Enhancements
- Add sentiment analysis using financial news
- Implement portfolio optimization
- Add voice-based Gemini interaction
## 🏆 Conclusion
This project demonstrates an Agentic AI system that autonomously decides, reasons, and executes stock market analysis using Gemini API and custom agents.
## 👨‍💻 Author
Name: Allen Bose
Year: 2025
import google.generativeai as genai
def resolve_tickers(query):
"""
Uses Gemini LLM to find stock ticker symbols mentioned in natural text.
Example: "compare Tesla and Apple" → ["TSLA", "AAPL"]
"""
model = genai.GenerativeModel("gemini-2.5-flash")
prompt = f"""
Extract the official Yahoo Finance ticker symbols for the companies mentioned in this text:
"{query}"
Respond with only a comma-separated list of ticker symbols (like TSLA,AAPL,NVDA).
"""
response = model.generate_content(prompt)
tickers = [t.strip().upper() for t in response.text.split(",") if t.strip()]
return tickers
import google.generativeai as genai
def detect_intent(query):
"""
Uses Gemini LLM to understand user intent and return one of:
'analyze', 'risk', 'compare', or 'general'
"""
model = genai.GenerativeModel("gemini-2.5-flash")
prompt = f"""
You are a routing assistant in a financial analysis system.
Analyze the following user query and classify it into one of four categories:
- 'analyze' → if user wants to check stock performance, trend, or price movement
- 'risk' → if user asks about volatility, safety, or risk level
- 'compare' → if user compares two or more companies
- 'general' → if it’s general finance or unrelated
Query: "{query}"
Respond with only one word: analyze, risk, compare, or general.
"""
response = model.generate_content(prompt)
return response.text.strip().lower()
from agents import analyze_stock, risk_assessment, compare_stocks
from resolve_ticker import resolve_tickers, detect_intent
import google.generativeai as genai
def agent_router(query):
"""
Routes the user query to the appropriate function or LLM.
Ensures responses are concise and focused.
"""
intent = detect_intent(query)
tickers = resolve_tickers(query)
# Handle missing tickers
if not tickers:
return "⚠️ Couldn’t find any valid stock symbol. Please check your input."
# Route based on intent
if intent == "analyze":
raw_result = analyze_stock(tickers[0])
return raw_result
elif intent == "risk":
raw_result = risk_assessment(tickers[0])
return raw_result
elif intent == "compare" and len(tickers) >= 2:
raw_result = compare_stocks(tickers[0], tickers[1])
return raw_result
else:
prompt = f"""
Respond concisely to the user's query in 2-3 sentences, only providing the essential info.
Query: {query}
"""
model = genai.GenerativeModel("gemini-2.5-flash")
response = model.generate_content(prompt)
return response.text
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment