Skip to content

Commit 10b7326

Browse files
committed
WIP - Streamlit UI, Porting CLI
1 parent bf83ff7 commit 10b7326

File tree

9 files changed

+368
-79
lines changed

9 files changed

+368
-79
lines changed

README.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
1-
# How to Use AI Content Generation Toolkit - Alwrity
1+
# How to Alwrity - Getting Started
22

3+
Alwrity assists content creators and digital marketers in generating, formatting, and uploading blog content with unprecedented efficiency.
4+
5+
Our toolkit integrates advanced AI models for text generation, image creation, and data analysis, streamlining your content creation pipeline and ensuring high-quality output with minimal effort.
6+
7+
---
38
1). [Visit alwrity.com](https://www.alwrity.com/ai-writing-tools), You will find AI content writing tools, which are Free & No-Signup.
49
**Note:** Although, this is limited, as is our wallet & Resources.
510

alwrity_streamlit.py

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@
66
# Load .env file
77
load_dotenv()
88

9+
from lib.chatbot_custom.chatbot_local_docqa import alwrity_chat_docqa
910
from lib.utils.alwrity_streamlit_utils import (
1011
blog_from_keyword, ai_agents_team,
1112
blog_from_audio, write_story,
1213
essay_writer, ai_news_writer,
13-
ai_finance_ta_writer, ai_social_writer
14+
ai_finance_ta_writer, ai_social_writer,
15+
do_web_research, competitor_analysis,
1416
)
1517

1618
# Custom CSS for styling
@@ -291,8 +293,10 @@ def main():
291293
alwrity_brain()
292294

293295
with tab5:
294-
st.title("🙎 Ask Alwrity 🤦")
295-
st.write("Oh, you decided to talk to a chatbot? I guess even Netflix can't... Shall we get this over with?")
296+
st.info("Chatbot")
297+
st.markdown("Create a collection by uploading files (PDF, MD, CSV, etc), or crawl a data source (Websites, more sources coming soon.")
298+
st.markdown("One can ask/chat, summarize and do semantic search over the uploaded data")
299+
#alwrity_chat_docqa()
296300

297301
# Sidebar for prompt modification
298302
st.sidebar.title("📝 Modify Prompts")
@@ -350,19 +354,17 @@ def content_planning_tools():
350354
Provide few keywords to get Google, Neural, pytrends analysis. Know keywords, blog titles to target.
351355
Generate months long content calender around given keywords.""")
352356
options = [
353-
"Keywords web research🤓",
354-
"Competitor Analysis🧐",
355-
"Give me content calendar 🥹🥹"
357+
"Keywords Researcher",
358+
"Competitor Analysis"
356359
]
357360
choice = st.selectbox("Select a content planning tool:", options, index=0, format_func=lambda x: f"🔍 {x}")
358361

359-
if st.button("Plan Content"):
360-
if choice == "Keywords web research🤓":
361-
do_web_research()
362-
elif choice == "Competitor Analysis🧐":
363-
competitor_analysis()
364-
elif choice == "Give me content calendar 🥹🥹":
365-
content_planning_agents()
362+
if choice == "Keywords Researcher":
363+
do_web_research()
364+
elif choice == "Competitor Analysis":
365+
competitor_analysis()
366+
#elif choice == "Get Content Calender":
367+
# planning_agents()
366368

367369

368370
def alwrity_brain():

lib/ai_web_researcher/google_trends_researcher.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,8 @@ def get_related_topics_and_save_csv(search_keywords):
187187
data = pytrends.related_topics()
188188
except Exception as err:
189189
logger.error(f"Failed to get pytrends realted topics: {err}")
190-
return
190+
return None
191+
191192
# Extract data from the result
192193
top_topics = list(data.values())[0]['top']
193194
rising_topics = list(data.values())[0]['rising']

lib/ai_web_researcher/metaphor_basic_neural_web_search.py

Lines changed: 39 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from metaphor_python import Metaphor
88
from datetime import datetime, timedelta
99

10+
import streamlit as st
1011
from loguru import logger
1112
from tqdm import tqdm
1213
from tabulate import tabulate
@@ -71,29 +72,46 @@ def metaphor_find_similar(similar_url):
7172
raise
7273

7374
competitors = search_response.results
74-
urls = {}
75+
# Initialize lists to store titles and URLs
76+
titles = []
77+
urls = []
78+
79+
# Initialize lists to store titles, URLs, and contents
80+
titles = []
81+
urls = []
82+
contents = []
83+
84+
# Extract titles, URLs, and contents from the competitors
7585
for c in competitors:
76-
print(c.title + ':' + c.url)
77-
for acompetitor in tqdm(competitors, desc="Processing URL content", unit="competitor"):
86+
titles.append(c.title)
87+
urls.append(c.url)
88+
# Simulate web content fetching and summarization (replace with actual logic)
7889
all_contents = ""
7990
try:
8091
search_response = metaphor.search_and_contents(
81-
acompetitor.url,
92+
c.url,
8293
type="keyword",
83-
num_results=3
94+
num_results=1
8495
)
96+
research_response = search_response.results
97+
for r in research_response:
98+
all_contents += r.text
99+
c.text = summarize_competitor_content(all_contents) # Replace with actual summarization function
85100
except Exception as err:
86-
logger.error(f"Failed to do metaphor keyword/url research: {err}")
87-
88-
research_response = search_response.results
89-
# Add a progress bar for the inner loop
90-
for r in tqdm(research_response, desc=f"{acompetitor.url}", unit="research"):
91-
all_contents += r.text
92-
try:
93-
acompetitor.text = summarize_competitor_content(all_contents)
94-
except Exception as err:
95-
logger.error(f"Failed to summarize_web_content: {err}")
101+
c.text = f"Failed to summarize content: {err}"
102+
contents.append(c.text)
96103

104+
# Create a DataFrame from the titles, URLs, and contents
105+
df = pd.DataFrame({
106+
"Title": titles,
107+
"URL": urls,
108+
"Content Summary": contents
109+
})
110+
# Display the DataFrame as a table
111+
if not df.empty:
112+
st.write("### Competitor Analysis Results")
113+
st.table(df)
114+
97115
print_search_result(competitors)
98116
return search_response
99117

@@ -179,6 +197,12 @@ def print_search_result(contents_response):
179197
tablefmt="fancy_grid",
180198
colalign=["left", "left", "left"],
181199
maxcolwidths=[20, 20, 70])
200+
201+
# Convert table_data to DataFrame
202+
import pandas as pd
203+
df = pd.DataFrame(table_data, columns=["URL", "Title", "Summary"])
204+
import streamlit as st
205+
st.table(df)
182206
print(table)
183207
# Save the combined table to a file
184208
try:
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
import time
2+
import os
3+
import joblib
4+
import streamlit as st
5+
import google.generativeai as genai
6+
from dotenv import load_dotenv
7+
8+
# Load environment variables
9+
load_dotenv()
10+
GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
11+
genai.configure(api_key=os.environ.get('GEMINI_API_KEY'))
12+
13+
# Constants
14+
MODEL_ROLE = 'ai'
15+
AI_AVATAR_ICON = '👄'
16+
DATA_DIR = 'data/'
17+
18+
19+
def history_chatbot():
20+
# Ensure the data/ directory exists
21+
os.makedirs(DATA_DIR, exist_ok=True)
22+
23+
# Generate a new chat ID
24+
new_chat_id = f'{time.time()}'
25+
26+
# Load past chats if available
27+
try:
28+
past_chats = joblib.load(os.path.join(DATA_DIR, 'past_chats_list'))
29+
except FileNotFoundError:
30+
past_chats = {}
31+
32+
# Sidebar for past chats
33+
with st.sidebar:
34+
st.write('# Past Chats')
35+
if 'chat_id' not in st.session_state:
36+
st.session_state.chat_id = st.selectbox(
37+
label='Pick a past chat',
38+
options=[new_chat_id] + list(past_chats.keys()),
39+
format_func=lambda x: past_chats.get(x, 'New Chat'),
40+
placeholder='_'
41+
)
42+
else:
43+
st.session_state.chat_id = st.selectbox(
44+
label='Pick a past chat',
45+
options=[new_chat_id, st.session_state.chat_id] + list(past_chats.keys()),
46+
index=1,
47+
format_func=lambda x: past_chats.get(x, 'New Chat' if x != st.session_state.chat_id else st.session_state.chat_title),
48+
placeholder='_'
49+
)
50+
st.session_state.chat_title = f'ChatSession-{st.session_state.chat_id}'
51+
52+
# Load chat history if available
53+
try:
54+
st.session_state.messages = joblib.load(os.path.join(DATA_DIR, f'{st.session_state.chat_id}-st_messages'))
55+
st.session_state.gemini_history = joblib.load(os.path.join(DATA_DIR, f'{st.session_state.chat_id}-gemini_messages'))
56+
print('Loaded existing chat history')
57+
except FileNotFoundError:
58+
st.session_state.messages = []
59+
st.session_state.gemini_history = []
60+
print('Initialized new chat history')
61+
62+
# Configure the AI model
63+
st.session_state.model = genai.GenerativeModel('gemini-pro')
64+
st.session_state.chat = st.session_state.model.start_chat(history=st.session_state.gemini_history)
65+
66+
# Display past messages
67+
for message in st.session_state.messages:
68+
with st.chat_message(name=message['role'], avatar=message.get('avatar')):
69+
st.markdown(message['content'])
70+
71+
# Handle user input
72+
if prompt := st.chat_input('Ask Alwrity...'):
73+
if st.session_state.chat_id not in past_chats:
74+
past_chats[st.session_state.chat_id] = st.session_state.chat_title
75+
joblib.dump(past_chats, os.path.join(DATA_DIR, 'past_chats_list'))
76+
77+
# Display and save user message
78+
with st.chat_message('user'):
79+
st.markdown(prompt)
80+
st.session_state.messages.append({'role': 'user', 'content': prompt})
81+
82+
# Send message to AI and stream the response
83+
response = st.session_state.chat.send_message(prompt, stream=True)
84+
full_response = ''
85+
with st.chat_message(name=MODEL_ROLE, avatar=AI_AVATAR_ICON):
86+
message_placeholder = st.empty()
87+
for chunk in response:
88+
for ch in chunk.text.split(' '):
89+
full_response += ch + ' '
90+
time.sleep(0.05)
91+
message_placeholder.write(full_response + '▌')
92+
message_placeholder.write(full_response)
93+
94+
# Save the AI response
95+
st.session_state.messages.append({
96+
'role': MODEL_ROLE,
97+
'content': full_response,
98+
'avatar': AI_AVATAR_ICON
99+
})
100+
st.session_state.gemini_history = st.session_state.chat.history
101+
102+
# Persist chat history to disk
103+
joblib.dump(st.session_state.messages, os.path.join(DATA_DIR, f'{st.session_state.chat_id}-st_messages'))
104+
joblib.dump(st.session_state.gemini_history, os.path.join(DATA_DIR, f'{st.session_state.chat_id}-gemini_messages'))
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
import os
2+
import streamlit as st
3+
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document
4+
from llama_index.llms.openai import OpenAI
5+
import openai
6+
from pathlib import Path
7+
from dotenv import load_dotenv
8+
9+
# Load environment variables
10+
load_dotenv(Path("../../.env"))
11+
openai.api_key = os.getenv("OPENAI_API_KEY")
12+
13+
14+
def initialize_session_state():
15+
"""Initialize the chat message history in session state."""
16+
if "messages" not in st.session_state:
17+
st.session_state.messages = [
18+
{"role": "assistant", "content": f"Ask me a question about documents from {LOCAL_BRAIN_DATA} or from the Web."}
19+
]
20+
21+
22+
@st.cache_resource(show_spinner=False)
23+
def load_data(input_dir):
24+
"""Load and index documents from the specified directory."""
25+
with st.spinner("Loading and indexing your docs – hang tight! This should take 1-2 minutes."):
26+
reader = SimpleDirectoryReader(input_dir=input_dir, recursive=True)
27+
docs = reader.load_data()
28+
service_context = ServiceContext.from_defaults(
29+
llm=OpenAI(
30+
model="gpt-3.5-turbo",
31+
temperature=0.5,
32+
system_prompt=(
33+
"You are an expert on content & digital marketing and your job is to answer technical questions."
34+
"Assume that all questions are related to provided documents, as context."
35+
"Keep your answers technical and based on facts – do not hallucinate features."
36+
)
37+
)
38+
)
39+
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
40+
return index
41+
42+
43+
def display_chat_history():
44+
"""Display the chat message history."""
45+
for message in st.session_state.messages:
46+
with st.chat_message(message["role"]):
47+
st.write(message["content"])
48+
49+
50+
def generate_response(prompt, chat_engine):
51+
"""Generate a response from the chat engine and update the chat history."""
52+
if prompt:
53+
st.session_state.messages.append({"role": "user", "content": prompt})
54+
55+
with st.chat_message("assistant"):
56+
with st.spinner("Thinking..."):
57+
response = chat_engine.chat(prompt)
58+
st.write(response.response)
59+
st.session_state.messages.append({"role": "assistant", "content": response.response})
60+
61+
62+
def alwrity_chat_docqa():
63+
"""Main function to run the Streamlit app."""
64+
st.header("Ask Alwrity 💬 📚")
65+
initialize_session_state()
66+
option = st.radio(
67+
"Choose Data Source To Ask From:",
68+
("Ask Your Local Docs", "Ask Your PDFs", "Ask Your Videos", "Ask Your Audio Files")
69+
)
70+
71+
if option == "Ask Your Local Docs":
72+
input_dir = st.text_input("Enter the path to the folder:")
73+
if input_dir:
74+
st.session_state.input_dir = input_dir
75+
76+
elif option == "Ask Your PDFs":
77+
pdf_file = st.file_uploader("Upload a PDF file or enter a URL:", type=["pdf"])
78+
if pdf_file:
79+
st.session_state.input_file = pdf_file
80+
81+
elif option == "Ask Your Videos":
82+
video_dir = st.text_input("Enter the path to the video folder:")
83+
if video_dir:
84+
st.session_state.input_dir = video_dir
85+
86+
elif option == "Ask Your Audio Files":
87+
audio_dir = st.text_input("Enter the path to the audio folder:")
88+
if audio_dir:
89+
st.session_state.input_dir = audio_dir
90+
91+
if 'input_dir' in st.session_state:
92+
index = load_data(st.session_state.input_dir)
93+
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
94+
display_chat_history()
95+
prompt = st.chat_input("Your question")
96+
if st.session_state.messages[-1]["role"] != "assistant":
97+
generate_response(prompt, chat_engine)
98+
99+
elif 'input_file' in st.session_state:
100+
# Handle PDF file or URL input here
101+
st.write("Handling PDF file or URL input is not implemented yet.")

0 commit comments

Comments
 (0)