-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp_YouTubeTextGenerator.py
More file actions
76 lines (57 loc) · 2.67 KB
/
app_YouTubeTextGenerator.py
File metadata and controls
76 lines (57 loc) · 2.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
apikey = 'INSERTAPIKEYHERE'
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
from langchain.memory import ConversationBufferMemory
from langchain.utilities import WikipediaAPIWrapper
os.environ['OPENAI_API_KEY'] = apikey
#prompt templates
title_template = PromptTemplate(
input_variables= ['topic'],
template='write me a title for this {topic}.'
)
script_template = PromptTemplate(
input_variables= ['title', 'wikipedia_research'],
template='write me a script based on the title. TITLE:{title} while leveraging this wikipedia research:{wikipedia_research}'
)
bibliography_links_template = PromptTemplate(
input_variables=['topic'],
template='provide me links to relevant knowledge sources about {topic}.'
)
#Memory
title_memory = ConversationBufferMemory(input_key='topic', memory_key='chat_history')
script_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')
#app framework
st.title('🦜🔗 GPT YouTube Video Script & Info Gatherer')
prompt = st.text_input('Plug in your prompt here')
# LLMs
llm = OpenAI(temperature=0.9)
title_chain = LLMChain(llm=llm, prompt=title_template, verbose=True, output_key='title', memory=title_memory)
script_chain = LLMChain(llm=llm, prompt=script_template, verbose=True, output_key= 'script', memory=script_memory)
bibliography_links_chain = LLMChain(llm=llm, prompt=bibliography_links_template, verbose=True, output_key='bibliography')
wiki = WikipediaAPIWrapper()
#Running two LLM chains independantly now, so we don't need this to be sequential.
#sequential_chain = SequentialChain(chains=[title_chain, script_chain, bibliography_links_chain], input_variables=['topic'], output_variables=['title', 'script', 'bibliography'], verbose=True)
#Show Prompt to screen
if prompt:
# Run the title chain to generate a title.
title = title_chain.run(prompt)
# Run the wikipedia research to get some information about the topic.
wiki_research = wiki.run(prompt)
# Run the script chain to generate a script based on the title and wikipedia research.
script = script_chain.run(title=title, wikipedia_research=wiki_research)
# Run the bibliography chain to generate a list of links.
bibliography = bibliography_links_chain.run(topic=prompt)
st.write(title)
st.write(script)
st.write(bibliography)
with st.expander('Message History'):
st.info(title_memory.buffer)
with st.expander('Script History'):
st.info(script_memory.buffer)
with st.expander('Wikipedia Research'):
st.info(wiki_research)
else:
st.warning("Please enter a prompt.")