Skip to content

Commit 45bc33f

Browse files
Sam WitteveenSam Witteveen
authored andcommitted
first commit
0 parents  commit 45bc33f

24 files changed

+59216
-0
lines changed

LangChain_Basics_01_LLMs_+_Prompting.ipynb

Lines changed: 482 additions & 0 deletions
Large diffs are not rendered by default.

YT_AutoGPT_Basics.ipynb

Lines changed: 919 additions & 0 deletions
Large diffs are not rendered by default.

YT_BabyAGI.ipynb

Lines changed: 996 additions & 0 deletions
Large diffs are not rendered by default.

YT_BabyAGI_Langchain_with_Tools.ipynb

Lines changed: 1286 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 396 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,396 @@
1+
{
2+
"nbformat": 4,
3+
"nbformat_minor": 0,
4+
"metadata": {
5+
"colab": {
6+
"provenance": []
7+
},
8+
"kernelspec": {
9+
"name": "python3",
10+
"display_name": "Python 3"
11+
},
12+
"language_info": {
13+
"name": "python"
14+
}
15+
},
16+
"cells": [
17+
{
18+
"cell_type": "code",
19+
"execution_count": null,
20+
"metadata": {
21+
"id": "4ca2Z08vpqfJ"
22+
},
23+
"outputs": [],
24+
"source": [
25+
"!pip -q install openai langchain==0.0.99rc0"
26+
]
27+
},
28+
{
29+
"cell_type": "markdown",
30+
"source": [
31+
"## Basics with OpenAI API"
32+
],
33+
"metadata": {
34+
"id": "ne-Qg0YiqA75"
35+
}
36+
},
37+
{
38+
"cell_type": "code",
39+
"source": [
40+
"import os\n",
41+
"import openai\n",
42+
"\n",
43+
"openai.api_key =''\n",
44+
"os.environ['OPENAI_API_KEY'] = ''"
45+
],
46+
"metadata": {
47+
"id": "M5b0ALlsp8Eh"
48+
},
49+
"execution_count": null,
50+
"outputs": []
51+
},
52+
{
53+
"cell_type": "code",
54+
"source": [
55+
"response = openai.ChatCompletion.create(\n",
56+
" model=\"gpt-3.5-turbo\",\n",
57+
" messages=[\n",
58+
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
59+
" {\"role\": \"user\", \"content\": \"Hello what kind of assistant are you?\"},\n",
60+
" ]\n",
61+
")"
62+
],
63+
"metadata": {
64+
"id": "iYvl1FGPrMNn"
65+
},
66+
"execution_count": null,
67+
"outputs": []
68+
},
69+
{
70+
"cell_type": "code",
71+
"source": [
72+
"response"
73+
],
74+
"metadata": {
75+
"colab": {
76+
"base_uri": "https://localhost:8080/"
77+
},
78+
"id": "Ch7hAIq3rRLo",
79+
"outputId": "f9ec3e07-9eaf-4780-8ed0-fba0b2caeb9e"
80+
},
81+
"execution_count": null,
82+
"outputs": [
83+
{
84+
"output_type": "execute_result",
85+
"data": {
86+
"text/plain": [
87+
"<OpenAIObject chat.completion id=chatcmpl-6pakUtAnnDzKpBkh2QcxLAp1ymTtR at 0x7fdae98f9400> JSON: {\n",
88+
" \"choices\": [\n",
89+
" {\n",
90+
" \"finish_reason\": \"stop\",\n",
91+
" \"index\": 0,\n",
92+
" \"message\": {\n",
93+
" \"content\": \"I am a virtual assistant, equipped with AI technology to assist you with various tasks and answer your questions as best as I can. How may I assist you today?\",\n",
94+
" \"role\": \"assistant\"\n",
95+
" }\n",
96+
" }\n",
97+
" ],\n",
98+
" \"created\": 1677754010,\n",
99+
" \"id\": \"chatcmpl-6pakUtAnnDzKpBkh2QcxLAp1ymTtR\",\n",
100+
" \"model\": \"gpt-3.5-turbo-0301\",\n",
101+
" \"object\": \"chat.completion\",\n",
102+
" \"usage\": {\n",
103+
" \"completion_tokens\": 35,\n",
104+
" \"prompt_tokens\": 26,\n",
105+
" \"total_tokens\": 61\n",
106+
" }\n",
107+
"}"
108+
]
109+
},
110+
"metadata": {},
111+
"execution_count": 4
112+
}
113+
]
114+
},
115+
{
116+
"cell_type": "markdown",
117+
"source": [
118+
"### Chat Markup Language - token system\n",
119+
"\n",
120+
"```markdown\n",
121+
"<|im_start|>system\n",
122+
"You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\n",
123+
"Knowledge cutoff: 2021-09-01\n",
124+
"Current date: 2023-03-01<|im_end|>\n",
125+
"<|im_start|>user\n",
126+
"How are you<|im_end|>\n",
127+
"<|im_start|>assistant\n",
128+
"I am doing well!<|im_end|>\n",
129+
"<|im_start|>user\n",
130+
"How are you now?<|im_end|>\n",
131+
"```\n",
132+
"\n",
133+
"```\n",
134+
"import openai\n",
135+
"\n",
136+
"openai.ChatCompletion.create(\n",
137+
" model=\"gpt-3.5-turbo\",\n",
138+
" messages=[\n",
139+
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
140+
" {\"role\": \"user\", \"content\": \"Who won the world series in 2020?\"},\n",
141+
" {\"role\": \"assistant\", \"content\": \"The Los Angeles Dodgers won the World Series in 2020.\"},\n",
142+
" {\"role\": \"user\", \"content\": \"Where was it played?\"}\n",
143+
" ]\n",
144+
")\n",
145+
"```"
146+
],
147+
"metadata": {
148+
"id": "5PCc41QfsI21"
149+
}
150+
},
151+
{
152+
"cell_type": "code",
153+
"source": [
154+
"messages=[\n",
155+
" {\"role\": \"system\", \"content\": \"You are a helpful assistant named Kate.\"},\n",
156+
" {\"role\": \"user\", \"content\": \"Hello what kind of assistant are you?\"},\n",
157+
" ]"
158+
],
159+
"metadata": {
160+
"id": "2CTxNRyZrgdN"
161+
},
162+
"execution_count": null,
163+
"outputs": []
164+
},
165+
{
166+
"cell_type": "code",
167+
"source": [
168+
"conversation_total_tokens = 0\n",
169+
"\n",
170+
"while True:\n",
171+
" message = input(\"Human: \")\n",
172+
" if message=='exit':\n",
173+
" print(f\"{conversation_total_tokens} tokens used in total in this conversation\")\n",
174+
" break\n",
175+
" if message:\n",
176+
" messages.append(\n",
177+
" {\"role\": \"user\", \"content\": message},\n",
178+
" )\n",
179+
" response = openai.ChatCompletion.create(\n",
180+
" model=\"gpt-3.5-turbo\", messages=messages\n",
181+
" )\n",
182+
" \n",
183+
" reply = response.choices[0].message.content\n",
184+
" total_tokens = response.usage['total_tokens']\n",
185+
" conversation_total_tokens += total_tokens\n",
186+
" print(f\"ChatGPT: {reply} \\n {total_tokens} tokens used\")\n",
187+
" messages.append({\"role\": \"assistant\", \"content\": reply})"
188+
],
189+
"metadata": {
190+
"colab": {
191+
"base_uri": "https://localhost:8080/"
192+
},
193+
"id": "keYvaAHJuzef",
194+
"outputId": "8f3e3467-20ca-43ed-bd56-756ae48cd6ba"
195+
},
196+
"execution_count": null,
197+
"outputs": [
198+
{
199+
"output_type": "stream",
200+
"name": "stdout",
201+
"text": [
202+
"Human: When was Marcus Aurelius emperor of Rome?\n",
203+
"ChatGPT: Hello! I'm Kate, a virtual assistant here to help you. The Roman emperor Marcus Aurelius ruled from 161 AD to his death in 180 AD. He was the last of the Five Good Emperors of Rome. \n",
204+
" 91 tokens used\n",
205+
"Human: Who was his wife?\n",
206+
"ChatGPT: Marcus Aurelius' wife was named Faustina the Younger. She was also his first cousin and they were married in 145 AD. They had 13 children together, many of whom did not survive childhood. Faustina was known for her intelligence, beauty, and devotion to her husband. She was later deified after her death. \n",
207+
" 176 tokens used\n",
208+
"Human: how many children did they have?\n",
209+
"ChatGPT: Marcus Aurelius and Faustina the Younger had 14 children together, including 9 daughters and 5 sons. However, most of their children died at an early age, and only a few survived into adulthood. Their most famous surviving child was Annia Galeria Faustina, who became the wife of Marcus Aurelius' co-emperor and adopted brother, Lucius Verus. \n",
210+
" 273 tokens used\n",
211+
"Human: exit\n",
212+
"540 tokens used in total in this conversation\n"
213+
]
214+
}
215+
]
216+
},
217+
{
218+
"cell_type": "markdown",
219+
"source": [
220+
"## ChatGPT with LangChain"
221+
],
222+
"metadata": {
223+
"id": "uAGcCgZkxbmy"
224+
}
225+
},
226+
{
227+
"cell_type": "code",
228+
"source": [
229+
"!pip show langchain"
230+
],
231+
"metadata": {
232+
"colab": {
233+
"base_uri": "https://localhost:8080/"
234+
},
235+
"id": "xDQfHX6AwYea",
236+
"outputId": "13da4630-5237-4665-d800-a1b3cdfe38d0"
237+
},
238+
"execution_count": null,
239+
"outputs": [
240+
{
241+
"output_type": "stream",
242+
"name": "stdout",
243+
"text": [
244+
"Name: langchain\n",
245+
"Version: 0.0.99rc0\n",
246+
"Summary: Building applications with LLMs through composability\n",
247+
"Home-page: https://www.github.com/hwchase17/langchain\n",
248+
"Author: \n",
249+
"Author-email: \n",
250+
"License: MIT\n",
251+
"Location: /usr/local/lib/python3.8/dist-packages\n",
252+
"Requires: aiohttp, aleph-alpha-client, dataclasses-json, deeplake, numpy, pydantic, PyYAML, requests, SQLAlchemy, tenacity\n",
253+
"Required-by: \n"
254+
]
255+
}
256+
]
257+
},
258+
{
259+
"cell_type": "code",
260+
"source": [
261+
"from langchain import PromptTemplate, LLMChain\n",
262+
"from langchain.prompts import PromptTemplate\n",
263+
"from langchain.llms import OpenAI, OpenAIChat"
264+
],
265+
"metadata": {
266+
"id": "SZXeJzHGxgOw"
267+
},
268+
"execution_count": null,
269+
"outputs": []
270+
},
271+
{
272+
"cell_type": "code",
273+
"source": [
274+
"prefix_messages = [{\"role\": \"system\", \"content\": \"You are a helpful history professor named Kate.\"}]\n"
275+
],
276+
"metadata": {
277+
"id": "g-00Nk5704vL"
278+
},
279+
"execution_count": null,
280+
"outputs": []
281+
},
282+
{
283+
"cell_type": "code",
284+
"source": [
285+
"## old way\n",
286+
"# llm = OpenAI(model_name=\"text-davinci-003\",\n",
287+
"# temperature=0, )\n",
288+
"\n",
289+
"## New way\n",
290+
"llm = OpenAIChat(model_name='gpt-3.5-turbo', \n",
291+
" temperature=0, \n",
292+
" prefix_messages=prefix_messages,\n",
293+
" max_tokens = 256)"
294+
],
295+
"metadata": {
296+
"id": "ygQ3pfROxyhW"
297+
},
298+
"execution_count": null,
299+
"outputs": []
300+
},
301+
{
302+
"cell_type": "code",
303+
"source": [
304+
"\n",
305+
"template = \"\"\"Take the following question: {user_input}\n",
306+
"\n",
307+
"Answer it in an informative and intersting but conscise way for someone who is new to this topic.\"\"\"\n",
308+
"\n",
309+
"prompt = PromptTemplate(template=template, \n",
310+
" input_variables=[\"user_input\"])\n"
311+
],
312+
"metadata": {
313+
"id": "lkX7ybjFFHkn"
314+
},
315+
"execution_count": null,
316+
"outputs": []
317+
},
318+
{
319+
"cell_type": "code",
320+
"source": [
321+
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
322+
"\n",
323+
"user_input = \"When was Marcus Aurelius the emperor of Rome?\"\n",
324+
"\n",
325+
"llm_chain.run(user_input)"
326+
],
327+
"metadata": {
328+
"id": "U0wSCoNvFNI9",
329+
"colab": {
330+
"base_uri": "https://localhost:8080/",
331+
"height": 87
332+
},
333+
"outputId": "78b85cb6-231c-4821-ac5d-41563ab0413e"
334+
},
335+
"execution_count": null,
336+
"outputs": [
337+
{
338+
"output_type": "execute_result",
339+
"data": {
340+
"text/plain": [
341+
"'Marcus Aurelius was the emperor of Rome from 161 to 180 AD. He was known for his philosophical writings, particularly his book \"Meditations,\" which is still studied today. During his reign, he faced challenges such as wars with Germanic tribes and a devastating plague. Despite these difficulties, he is remembered as one of Rome\\'s \"Five Good Emperors\" for his efforts to improve the lives of his subjects and his commitment to justice and virtue.'"
342+
],
343+
"application/vnd.google.colaboratory.intrinsic+json": {
344+
"type": "string"
345+
}
346+
},
347+
"metadata": {},
348+
"execution_count": 11
349+
}
350+
]
351+
},
352+
{
353+
"cell_type": "code",
354+
"source": [
355+
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
356+
"\n",
357+
"user_input = \"Who was Marcus Aurelius married to?\"\n",
358+
"\n",
359+
"llm_chain.run(user_input)"
360+
],
361+
"metadata": {
362+
"colab": {
363+
"base_uri": "https://localhost:8080/",
364+
"height": 87
365+
},
366+
"id": "WOlKi88W2SsJ",
367+
"outputId": "e2de79e5-b366-4899-82a0-d2c7780e5dde"
368+
},
369+
"execution_count": null,
370+
"outputs": [
371+
{
372+
"output_type": "execute_result",
373+
"data": {
374+
"text/plain": [
375+
"'Marcus Aurelius was married to a woman named Faustina the Younger. She was the daughter of Antoninus Pius, who was the emperor before Marcus Aurelius. Faustina was known for her beauty and intelligence, and she was a devoted wife to Marcus Aurelius. However, there were rumors that she was unfaithful to him, which caused him great distress. Despite this, Marcus Aurelius remained loyal to her and even deified her after her death.'"
376+
],
377+
"application/vnd.google.colaboratory.intrinsic+json": {
378+
"type": "string"
379+
}
380+
},
381+
"metadata": {},
382+
"execution_count": 12
383+
}
384+
]
385+
},
386+
{
387+
"cell_type": "code",
388+
"source": [],
389+
"metadata": {
390+
"id": "reuPLunX3sEm"
391+
},
392+
"execution_count": null,
393+
"outputs": []
394+
}
395+
]
396+
}

0 commit comments

Comments
 (0)