added a simple transformer model
Browse files- GPT_from_Scratch.ipynb +603 -0
GPT_from_Scratch.ipynb
ADDED
|
@@ -0,0 +1,603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"nbformat": 4,
|
| 3 |
+
"nbformat_minor": 0,
|
| 4 |
+
"metadata": {
|
| 5 |
+
"colab": {
|
| 6 |
+
"provenance": [],
|
| 7 |
+
"machine_shape": "hm",
|
| 8 |
+
"gpuType": "V100",
|
| 9 |
+
"authorship_tag": "ABX9TyPXG8YN53SD54EwJ7vikFke",
|
| 10 |
+
"include_colab_link": true
|
| 11 |
+
},
|
| 12 |
+
"kernelspec": {
|
| 13 |
+
"name": "python3",
|
| 14 |
+
"display_name": "Python 3"
|
| 15 |
+
},
|
| 16 |
+
"language_info": {
|
| 17 |
+
"name": "python"
|
| 18 |
+
},
|
| 19 |
+
"accelerator": "GPU"
|
| 20 |
+
},
|
| 21 |
+
"cells": [
|
| 22 |
+
{
|
| 23 |
+
"cell_type": "markdown",
|
| 24 |
+
"metadata": {
|
| 25 |
+
"id": "view-in-github",
|
| 26 |
+
"colab_type": "text"
|
| 27 |
+
},
|
| 28 |
+
"source": [
|
| 29 |
+
"<a href=\"https://colab.research.google.com/github/shivendrra/SmallLanguageModel-project/blob/main/Demo%20Models/Colab%20Notebooks/GPT_from_Scratch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 30 |
+
]
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"cell_type": "code",
|
| 34 |
+
"source": [
|
| 35 |
+
"from google.colab import drive\n",
|
| 36 |
+
"drive.mount('/content/drive')"
|
| 37 |
+
],
|
| 38 |
+
"metadata": {
|
| 39 |
+
"id": "S4Vqi5Ii3hF_",
|
| 40 |
+
"colab": {
|
| 41 |
+
"base_uri": "https://localhost:8080/"
|
| 42 |
+
},
|
| 43 |
+
"outputId": "3b0bbe85-3c69-42ab-8071-2c1464515ec5"
|
| 44 |
+
},
|
| 45 |
+
"execution_count": 2,
|
| 46 |
+
"outputs": [
|
| 47 |
+
{
|
| 48 |
+
"output_type": "stream",
|
| 49 |
+
"name": "stdout",
|
| 50 |
+
"text": [
|
| 51 |
+
"Mounted at /content/drive\n"
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
]
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"cell_type": "code",
|
| 58 |
+
"source": [
|
| 59 |
+
"# importing the data\n",
|
| 60 |
+
"file_path = '/content/drive/MyDrive/big_data_v2.txt'\n",
|
| 61 |
+
"with open(file_path, 'r', encoding='utf-8') as file:\n",
|
| 62 |
+
" data = file.read()\n",
|
| 63 |
+
"total_no_of_words = len(data)\n",
|
| 64 |
+
"print(f\"{total_no_of_words/1e9} billion words\")"
|
| 65 |
+
],
|
| 66 |
+
"metadata": {
|
| 67 |
+
"id": "QbLkBa5S3pwl",
|
| 68 |
+
"colab": {
|
| 69 |
+
"base_uri": "https://localhost:8080/"
|
| 70 |
+
},
|
| 71 |
+
"outputId": "ba920417-cc65-49cc-f60f-8b7843bce6b3"
|
| 72 |
+
},
|
| 73 |
+
"execution_count": 3,
|
| 74 |
+
"outputs": [
|
| 75 |
+
{
|
| 76 |
+
"output_type": "stream",
|
| 77 |
+
"name": "stdout",
|
| 78 |
+
"text": [
|
| 79 |
+
"2.27416219 billion words\n"
|
| 80 |
+
]
|
| 81 |
+
}
|
| 82 |
+
]
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"cell_type": "code",
|
| 86 |
+
"source": [
|
| 87 |
+
"# total no of chars and vocab size\n",
|
| 88 |
+
"chars = sorted(list(set(data)))\n",
|
| 89 |
+
"vocab_size = len(chars)\n",
|
| 90 |
+
"# print(''.join(chars))\n",
|
| 91 |
+
"print('vocab size:', vocab_size)"
|
| 92 |
+
],
|
| 93 |
+
"metadata": {
|
| 94 |
+
"id": "aWU788nx3rhB",
|
| 95 |
+
"colab": {
|
| 96 |
+
"base_uri": "https://localhost:8080/"
|
| 97 |
+
},
|
| 98 |
+
"outputId": "adc94b5b-f6fa-4525-9498-45a325358a6b"
|
| 99 |
+
},
|
| 100 |
+
"execution_count": 4,
|
| 101 |
+
"outputs": [
|
| 102 |
+
{
|
| 103 |
+
"output_type": "stream",
|
| 104 |
+
"name": "stdout",
|
| 105 |
+
"text": [
|
| 106 |
+
"vocab size: 107363\n"
|
| 107 |
+
]
|
| 108 |
+
}
|
| 109 |
+
]
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"cell_type": "code",
|
| 113 |
+
"source": [
|
| 114 |
+
"# train-test split\n",
|
| 115 |
+
"\n",
|
| 116 |
+
"n = int(0.9*len(data)) # first 90% will be train, rest val\n",
|
| 117 |
+
"train_data = data[:n]\n",
|
| 118 |
+
"val_data = data[n:]"
|
| 119 |
+
],
|
| 120 |
+
"metadata": {
|
| 121 |
+
"id": "P7G65oTV3tma"
|
| 122 |
+
},
|
| 123 |
+
"execution_count": 5,
|
| 124 |
+
"outputs": []
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"cell_type": "code",
|
| 128 |
+
"source": [
|
| 129 |
+
"print(f\"train data {len(train_data)}, val data {len(val_data)}\")"
|
| 130 |
+
],
|
| 131 |
+
"metadata": {
|
| 132 |
+
"id": "M0flKW6njg5m",
|
| 133 |
+
"colab": {
|
| 134 |
+
"base_uri": "https://localhost:8080/"
|
| 135 |
+
},
|
| 136 |
+
"outputId": "37a19243-78c6-4aeb-f28a-f591d2cd1a46"
|
| 137 |
+
},
|
| 138 |
+
"execution_count": 6,
|
| 139 |
+
"outputs": [
|
| 140 |
+
{
|
| 141 |
+
"output_type": "stream",
|
| 142 |
+
"name": "stdout",
|
| 143 |
+
"text": [
|
| 144 |
+
"train data 2046745971, val data 227416219\n"
|
| 145 |
+
]
|
| 146 |
+
}
|
| 147 |
+
]
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"cell_type": "code",
|
| 151 |
+
"source": [
|
| 152 |
+
"import timeit\n",
|
| 153 |
+
"start_time = timeit.default_timer()"
|
| 154 |
+
],
|
| 155 |
+
"metadata": {
|
| 156 |
+
"id": "6vOM85YE3vse"
|
| 157 |
+
},
|
| 158 |
+
"execution_count": 7,
|
| 159 |
+
"outputs": []
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"cell_type": "code",
|
| 163 |
+
"source": [
|
| 164 |
+
"# encoder and decoder of the text\n",
|
| 165 |
+
"string_to_index = { ch:i for i,ch in enumerate(chars) }\n",
|
| 166 |
+
"index_to_string = { i:ch for i,ch in enumerate(chars) }\n",
|
| 167 |
+
"\n",
|
| 168 |
+
"encode = lambda s: [string_to_index[c] for c in s]\n",
|
| 169 |
+
"decode = lambda l: ''.join([index_to_string[i] for i in l])\n",
|
| 170 |
+
"\n",
|
| 171 |
+
"print(encode('hello there'))\n",
|
| 172 |
+
"print(decode(encode('hello there')))"
|
| 173 |
+
],
|
| 174 |
+
"metadata": {
|
| 175 |
+
"id": "1jOagLL43ymD",
|
| 176 |
+
"colab": {
|
| 177 |
+
"base_uri": "https://localhost:8080/"
|
| 178 |
+
},
|
| 179 |
+
"outputId": "14e8c75b-ba85-4c7a-9a6b-f9f94b706418"
|
| 180 |
+
},
|
| 181 |
+
"execution_count": 8,
|
| 182 |
+
"outputs": [
|
| 183 |
+
{
|
| 184 |
+
"output_type": "stream",
|
| 185 |
+
"name": "stdout",
|
| 186 |
+
"text": [
|
| 187 |
+
"[96, 93, 100, 100, 103, 24, 108, 96, 93, 106, 93]\n",
|
| 188 |
+
"hello there\n"
|
| 189 |
+
]
|
| 190 |
+
}
|
| 191 |
+
]
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"cell_type": "code",
|
| 195 |
+
"source": [
|
| 196 |
+
"import torch\n",
|
| 197 |
+
"\n",
|
| 198 |
+
"# Convert to tensors\n",
|
| 199 |
+
"train_data = torch.tensor(encode(train_data), dtype=torch.long)\n",
|
| 200 |
+
"val_data = torch.tensor(encode(val_data), dtype=torch.long)"
|
| 201 |
+
],
|
| 202 |
+
"metadata": {
|
| 203 |
+
"id": "ZXimMdyR32wN"
|
| 204 |
+
},
|
| 205 |
+
"execution_count": 9,
|
| 206 |
+
"outputs": []
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"cell_type": "code",
|
| 210 |
+
"source": [
|
| 211 |
+
"print(len(train_data)/1e6, 'million')\n",
|
| 212 |
+
"print(len(val_data)/1e6, 'million')"
|
| 213 |
+
],
|
| 214 |
+
"metadata": {
|
| 215 |
+
"id": "JKv8mswZIyYR",
|
| 216 |
+
"colab": {
|
| 217 |
+
"base_uri": "https://localhost:8080/"
|
| 218 |
+
},
|
| 219 |
+
"outputId": "6a265df8-b67e-4342-f0a3-ad28cbf325c7"
|
| 220 |
+
},
|
| 221 |
+
"execution_count": 10,
|
| 222 |
+
"outputs": [
|
| 223 |
+
{
|
| 224 |
+
"output_type": "stream",
|
| 225 |
+
"name": "stdout",
|
| 226 |
+
"text": [
|
| 227 |
+
"2046.745971 million\n",
|
| 228 |
+
"227.416219 million\n"
|
| 229 |
+
]
|
| 230 |
+
}
|
| 231 |
+
]
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"cell_type": "code",
|
| 235 |
+
"execution_count": 11,
|
| 236 |
+
"metadata": {
|
| 237 |
+
"id": "hSbAd0fCl_nx",
|
| 238 |
+
"colab": {
|
| 239 |
+
"base_uri": "https://localhost:8080/",
|
| 240 |
+
"height": 501
|
| 241 |
+
},
|
| 242 |
+
"outputId": "72ffab29-bc94-431a-c75c-6f64c3167fbb"
|
| 243 |
+
},
|
| 244 |
+
"outputs": [
|
| 245 |
+
{
|
| 246 |
+
"output_type": "stream",
|
| 247 |
+
"name": "stdout",
|
| 248 |
+
"text": [
|
| 249 |
+
"148.185955 million\n"
|
| 250 |
+
]
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"output_type": "error",
|
| 254 |
+
"ename": "OutOfMemoryError",
|
| 255 |
+
"evalue": "CUDA out of memory. Tried to allocate 26.21 GiB. GPU 0 has a total capacty of 15.77 GiB of which 13.89 GiB is free. Process 2283 has 1.88 GiB memory in use. Of the allocated memory 1.38 GiB is allocated by PyTorch, and 144.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
| 256 |
+
"traceback": [
|
| 257 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 258 |
+
"\u001b[0;31mOutOfMemoryError\u001b[0m Traceback (most recent call last)",
|
| 259 |
+
"\u001b[0;32m<ipython-input-11-9173ad31bf7a>\u001b[0m in \u001b[0;36m<cell line: 189>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[0;31m# every once in a while evaluate the loss on train and val sets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 192\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0miter\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0meval_interval\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0miter\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mmax_iters\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 193\u001b[0;31m \u001b[0mlosses\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mestimate_loss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 194\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 195\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 260 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py\u001b[0m in \u001b[0;36mdecorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 113\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mctx_factory\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 115\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 116\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 261 |
+
"\u001b[0;32m<ipython-input-11-9173ad31bf7a>\u001b[0m in \u001b[0;36mestimate_loss\u001b[0;34m()\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0meval_iters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 40\u001b[0;31m \u001b[0mlogits\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 41\u001b[0m \u001b[0mlosses\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlosses\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 262 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1516\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1517\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1518\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1519\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1520\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 263 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1525\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1526\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1527\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1528\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1529\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 264 |
+
"\u001b[0;32m<ipython-input-11-9173ad31bf7a>\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, idx, targets)\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mblocks\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# (B,T,C)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mln_f\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# (B,T,C)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 148\u001b[0;31m \u001b[0mlogits\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlm_head\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# (B,T,vocab_size)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 149\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtargets\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 265 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1516\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1517\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1518\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1519\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1520\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 266 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1525\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1526\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1527\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1528\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1529\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 267 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/linear.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 113\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 114\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 115\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mextra_repr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
| 268 |
+
"\u001b[0;31mOutOfMemoryError\u001b[0m: CUDA out of memory. Tried to allocate 26.21 GiB. GPU 0 has a total capacty of 15.77 GiB of which 13.89 GiB is free. Process 2283 has 1.88 GiB memory in use. Of the allocated memory 1.38 GiB is allocated by PyTorch, and 144.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF"
|
| 269 |
+
]
|
| 270 |
+
}
|
| 271 |
+
],
|
| 272 |
+
"source": [
|
| 273 |
+
"import torch\n",
|
| 274 |
+
"import torch.nn as nn\n",
|
| 275 |
+
"from torch.nn import functional as F\n",
|
| 276 |
+
"\n",
|
| 277 |
+
"# hyperparameters\n",
|
| 278 |
+
"batch_size = 64 # independent sequences process in parallel\n",
|
| 279 |
+
"block_size = 1024 # maximum context length for predictions\n",
|
| 280 |
+
"max_iters = 10000\n",
|
| 281 |
+
"eval_interval = 1000\n",
|
| 282 |
+
"learning_rate = 3e-4\n",
|
| 283 |
+
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
|
| 284 |
+
"eval_iters = 200\n",
|
| 285 |
+
"n_embd = 512\n",
|
| 286 |
+
"n_head = 12\n",
|
| 287 |
+
"n_layer = 12\n",
|
| 288 |
+
"dropout = 0.2\n",
|
| 289 |
+
"norm_eps = 1e-05\n",
|
| 290 |
+
"# ------------\n",
|
| 291 |
+
"\n",
|
| 292 |
+
"torch.manual_seed(1400)\n",
|
| 293 |
+
"\n",
|
| 294 |
+
"# data loading\n",
|
| 295 |
+
"def get_batch(split):\n",
|
| 296 |
+
" # generate a small batch of data of inputs x and targets y\n",
|
| 297 |
+
" data = train_data if split == 'train' else val_data\n",
|
| 298 |
+
" ix = torch.randint(len(data) - block_size, (batch_size,))\n",
|
| 299 |
+
" x = torch.stack([data[i:i+block_size] for i in ix])\n",
|
| 300 |
+
" y = torch.stack([data[i+1:i+block_size+1] for i in ix])\n",
|
| 301 |
+
" x, y = x.to(device), y.to(device)\n",
|
| 302 |
+
" return x, y\n",
|
| 303 |
+
"\n",
|
| 304 |
+
"@torch.no_grad()\n",
|
| 305 |
+
"def estimate_loss():\n",
|
| 306 |
+
" out = {}\n",
|
| 307 |
+
" model.eval()\n",
|
| 308 |
+
" for split in ['train', 'val']:\n",
|
| 309 |
+
" losses = torch.zeros(eval_iters)\n",
|
| 310 |
+
" for k in range(eval_iters):\n",
|
| 311 |
+
" X, Y = get_batch(split)\n",
|
| 312 |
+
" logits, loss = model(X, Y)\n",
|
| 313 |
+
" losses[k] = loss.item()\n",
|
| 314 |
+
" out[split] = losses.mean()\n",
|
| 315 |
+
" model.train()\n",
|
| 316 |
+
" return out\n",
|
| 317 |
+
"\n",
|
| 318 |
+
"class Head(nn.Module):\n",
|
| 319 |
+
" \"\"\" one head of self-attention \"\"\"\n",
|
| 320 |
+
"\n",
|
| 321 |
+
" def __init__(self, head_size):\n",
|
| 322 |
+
" super().__init__()\n",
|
| 323 |
+
" self.key = nn.Linear(n_embd, head_size, bias=False)\n",
|
| 324 |
+
" self.query = nn.Linear(n_embd, head_size, bias=False)\n",
|
| 325 |
+
" self.value = nn.Linear(n_embd, head_size, bias=False)\n",
|
| 326 |
+
" self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))\n",
|
| 327 |
+
"\n",
|
| 328 |
+
" self.dropout = nn.Dropout(dropout)\n",
|
| 329 |
+
"\n",
|
| 330 |
+
" def forward(self, x):\n",
|
| 331 |
+
" # input of size (batch, time-step, channels)\n",
|
| 332 |
+
" # output of size (batch, time-step, head size)\n",
|
| 333 |
+
" B,T,C = x.shape\n",
|
| 334 |
+
" k = self.key(x) # (B,T,hs)\n",
|
| 335 |
+
" q = self.query(x) # (B,T,hs)\n",
|
| 336 |
+
" # compute attention scores (\"affinities\")\n",
|
| 337 |
+
" wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T)\n",
|
| 338 |
+
" wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)\n",
|
| 339 |
+
" wei = F.softmax(wei, dim=-1) # (B, T, T)\n",
|
| 340 |
+
" wei = self.dropout(wei)\n",
|
| 341 |
+
" # perform the weighted aggregation of the values\n",
|
| 342 |
+
" v = self.value(x) # (B,T,hs)\n",
|
| 343 |
+
" out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs)\n",
|
| 344 |
+
" return out\n",
|
| 345 |
+
"\n",
|
| 346 |
+
"class MultiHeadAttention(nn.Module):\n",
|
| 347 |
+
" \"\"\" multiple heads of self-attention in parallel \"\"\"\n",
|
| 348 |
+
"\n",
|
| 349 |
+
" def __init__(self, num_heads, head_size):\n",
|
| 350 |
+
" super().__init__()\n",
|
| 351 |
+
" self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])\n",
|
| 352 |
+
" self.proj = nn.Linear(head_size * num_heads, n_embd)\n",
|
| 353 |
+
" self.dropout = nn.Dropout(dropout)\n",
|
| 354 |
+
"\n",
|
| 355 |
+
" def forward(self, x):\n",
|
| 356 |
+
" out = torch.cat([h(x) for h in self.heads], dim=-1)\n",
|
| 357 |
+
" out = self.dropout(self.proj(out))\n",
|
| 358 |
+
" return out\n",
|
| 359 |
+
"\n",
|
| 360 |
+
"class FeedFoward(nn.Module):\n",
|
| 361 |
+
" \"\"\" a simple linear layer followed by a non-linearity \"\"\"\n",
|
| 362 |
+
"\n",
|
| 363 |
+
" def __init__(self, n_embd):\n",
|
| 364 |
+
" super().__init__()\n",
|
| 365 |
+
" self.net = nn.Sequential(\n",
|
| 366 |
+
" nn.Linear(n_embd, 4 * n_embd),\n",
|
| 367 |
+
" nn.ReLU(),\n",
|
| 368 |
+
" nn.Linear(4 * n_embd, n_embd),\n",
|
| 369 |
+
" nn.Dropout(dropout),\n",
|
| 370 |
+
" )\n",
|
| 371 |
+
"\n",
|
| 372 |
+
" def forward(self, x):\n",
|
| 373 |
+
" return self.net(x)\n",
|
| 374 |
+
"class Block(nn.Module):\n",
|
| 375 |
+
" \"\"\" Transformer block: communication followed by computation \"\"\"\n",
|
| 376 |
+
"\n",
|
| 377 |
+
" def __init__(self, n_embd, n_head):\n",
|
| 378 |
+
" # n_embd: embedding dimension, n_head: the number of heads we'd like\n",
|
| 379 |
+
" super().__init__()\n",
|
| 380 |
+
" head_size = n_embd // n_head\n",
|
| 381 |
+
" self.sa = MultiHeadAttention(n_head, head_size)\n",
|
| 382 |
+
" self.ffwd = FeedFoward(n_embd)\n",
|
| 383 |
+
" self.ln1 = nn.LayerNorm(n_embd, eps=norm_eps)\n",
|
| 384 |
+
" self.ln2 = nn.LayerNorm(n_embd, eps=norm_eps)\n",
|
| 385 |
+
"\n",
|
| 386 |
+
" def forward(self, x):\n",
|
| 387 |
+
" x = x + self.sa(self.ln1(x))\n",
|
| 388 |
+
" x = x + self.ffwd(self.ln2(x))\n",
|
| 389 |
+
" return x\n",
|
| 390 |
+
"\n",
|
| 391 |
+
"class GPTLanguageModel(nn.Module):\n",
|
| 392 |
+
"\n",
|
| 393 |
+
" def __init__(self):\n",
|
| 394 |
+
" super().__init__()\n",
|
| 395 |
+
" # each token directly reads off the logits for the next token from a lookup table\n",
|
| 396 |
+
" self.token_embedding_table = nn.Embedding(vocab_size, n_embd)\n",
|
| 397 |
+
" self.position_embedding_table = nn.Embedding(block_size, n_embd)\n",
|
| 398 |
+
" self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])\n",
|
| 399 |
+
" self.ln_f = nn.LayerNorm(n_embd, eps=norm_eps) # final layer norm\n",
|
| 400 |
+
" self.lm_head = nn.Linear(n_embd, vocab_size)\n",
|
| 401 |
+
" self.apply(self._init_weights)\n",
|
| 402 |
+
"\n",
|
| 403 |
+
" def _init_weights(self, module):\n",
|
| 404 |
+
" if isinstance(module, nn.Linear):\n",
|
| 405 |
+
" torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n",
|
| 406 |
+
" if module.bias is not None:\n",
|
| 407 |
+
" torch.nn.init.zeros_(module.bias.data)\n",
|
| 408 |
+
" elif isinstance(module, nn.Embedding):\n",
|
| 409 |
+
" torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n",
|
| 410 |
+
"\n",
|
| 411 |
+
" def forward(self, idx, targets=None):\n",
|
| 412 |
+
" B, T = idx.shape\n",
|
| 413 |
+
"\n",
|
| 414 |
+
" # idx and targets are both (B,T) tensor of integers\n",
|
| 415 |
+
" tok_emb = self.token_embedding_table(idx) # (B,T,C)\n",
|
| 416 |
+
" pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)\n",
|
| 417 |
+
" x = tok_emb + pos_emb # (B,T,C)\n",
|
| 418 |
+
" x = self.blocks(x) # (B,T,C)\n",
|
| 419 |
+
" x = self.ln_f(x) # (B,T,C)\n",
|
| 420 |
+
" logits = self.lm_head(x) # (B,T,vocab_size)\n",
|
| 421 |
+
"\n",
|
| 422 |
+
" if targets is None:\n",
|
| 423 |
+
" loss = None\n",
|
| 424 |
+
" else:\n",
|
| 425 |
+
" B, T, C = logits.shape\n",
|
| 426 |
+
" logits = logits.view(B*T, C)\n",
|
| 427 |
+
" targets = targets.view(B*T)\n",
|
| 428 |
+
" loss = F.cross_entropy(logits, targets)\n",
|
| 429 |
+
"\n",
|
| 430 |
+
" return logits, loss\n",
|
| 431 |
+
"\n",
|
| 432 |
+
" def generate(self, idx, max_new_tokens):\n",
|
| 433 |
+
" # idx is (B, T) array of indices in the current context\n",
|
| 434 |
+
" for _ in range(max_new_tokens):\n",
|
| 435 |
+
" # crop idx to the last block_size tokens\n",
|
| 436 |
+
" idx_cond = idx[:, -block_size:]\n",
|
| 437 |
+
" # get the predictions\n",
|
| 438 |
+
" logits, loss = self(idx_cond)\n",
|
| 439 |
+
" # focus only on the last time step\n",
|
| 440 |
+
" logits = logits[:, -1, :] # becomes (B, C)\n",
|
| 441 |
+
" # apply softmax to get probabilities\n",
|
| 442 |
+
" probs = F.softmax(logits, dim=-1) # (B, C)\n",
|
| 443 |
+
" # sample from the distribution\n",
|
| 444 |
+
" idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)\n",
|
| 445 |
+
" # append sampled index to the running sequence\n",
|
| 446 |
+
" idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)\n",
|
| 447 |
+
" return idx\n",
|
| 448 |
+
"\n",
|
| 449 |
+
"model = GPTLanguageModel()\n",
|
| 450 |
+
"m = model.to(device)\n",
|
| 451 |
+
"# print the number of parameters in the model\n",
|
| 452 |
+
"n_param = sum(p.numel() for p in m.parameters())/1e6\n",
|
| 453 |
+
"print(n_param, 'million')\n",
|
| 454 |
+
"\n",
|
| 455 |
+
"# create a PyTorch optimizer\n",
|
| 456 |
+
"optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n",
|
| 457 |
+
"steps = []\n",
|
| 458 |
+
"train_losses = []\n",
|
| 459 |
+
"val_losses = []\n",
|
| 460 |
+
"\n",
|
| 461 |
+
"for iter in range(max_iters):\n",
|
| 462 |
+
"\n",
|
| 463 |
+
" # every once in a while evaluate the loss on train and val sets\n",
|
| 464 |
+
" if iter % eval_interval == 0 or iter == max_iters - 1:\n",
|
| 465 |
+
" losses = estimate_loss()\n",
|
| 466 |
+
" print(f\"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}\")\n",
|
| 467 |
+
"\n",
|
| 468 |
+
" # Store step and loss values for visualization\n",
|
| 469 |
+
" steps.append(iter)\n",
|
| 470 |
+
" train_losses.append(losses['train'])\n",
|
| 471 |
+
" val_losses.append(losses['val'])\n",
|
| 472 |
+
"\n",
|
| 473 |
+
" # sample a batch of data\n",
|
| 474 |
+
" xb, yb = get_batch('train')\n",
|
| 475 |
+
"\n",
|
| 476 |
+
" # evaluate the loss\n",
|
| 477 |
+
" logits, loss = model(xb, yb)\n",
|
| 478 |
+
" optimizer.zero_grad(set_to_none=True)\n",
|
| 479 |
+
" loss.backward()\n",
|
| 480 |
+
" optimizer.step()"
|
| 481 |
+
]
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"cell_type": "code",
|
| 485 |
+
"source": [
|
| 486 |
+
"run_time = timeit.default_timer()\n",
|
| 487 |
+
"total_time = run_time - start_time"
|
| 488 |
+
],
|
| 489 |
+
"metadata": {
|
| 490 |
+
"id": "BdXl_pFb2RqL"
|
| 491 |
+
},
|
| 492 |
+
"execution_count": null,
|
| 493 |
+
"outputs": []
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"cell_type": "code",
|
| 497 |
+
"source": [
|
| 498 |
+
"input_from_user = \"people often lie about themselves but\"\n",
|
| 499 |
+
"token_input = encode(input_from_user)"
|
| 500 |
+
],
|
| 501 |
+
"metadata": {
|
| 502 |
+
"id": "79TNjWKC3Ut8"
|
| 503 |
+
},
|
| 504 |
+
"execution_count": null,
|
| 505 |
+
"outputs": []
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"cell_type": "code",
|
| 509 |
+
"source": [
|
| 510 |
+
"# generate from the model\n",
|
| 511 |
+
"context = torch.tensor([token_input], dtype=torch.long, device=device)\n",
|
| 512 |
+
"generated_output = decode(m.generate(context, max_new_tokens=100)[0].tolist())"
|
| 513 |
+
],
|
| 514 |
+
"metadata": {
|
| 515 |
+
"id": "XSLsqXg03T3j"
|
| 516 |
+
},
|
| 517 |
+
"execution_count": null,
|
| 518 |
+
"outputs": []
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"cell_type": "code",
|
| 522 |
+
"source": [
|
| 523 |
+
"import matplotlib.pyplot as plt\n",
|
| 524 |
+
"\n",
|
| 525 |
+
"plt.figure(figsize=(10, 6))\n",
|
| 526 |
+
"plt.plot(steps, train_losses, label='Train Loss')\n",
|
| 527 |
+
"plt.plot(steps, val_losses, label='Validation Loss')\n",
|
| 528 |
+
"plt.title('Loss Over Steps')\n",
|
| 529 |
+
"plt.xlabel('Steps')\n",
|
| 530 |
+
"plt.ylabel('Loss')\n",
|
| 531 |
+
"plt.legend()\n",
|
| 532 |
+
"\n",
|
| 533 |
+
"plt.show()"
|
| 534 |
+
],
|
| 535 |
+
"metadata": {
|
| 536 |
+
"id": "vmjyuccN42pV"
|
| 537 |
+
},
|
| 538 |
+
"execution_count": null,
|
| 539 |
+
"outputs": []
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"cell_type": "code",
|
| 543 |
+
"source": [
|
| 544 |
+
"# saving the model\n",
|
| 545 |
+
"torch.save(model.state_dict(), 'transformer_model.pth')"
|
| 546 |
+
],
|
| 547 |
+
"metadata": {
|
| 548 |
+
"id": "5bwKjvO_aBZ-"
|
| 549 |
+
},
|
| 550 |
+
"execution_count": null,
|
| 551 |
+
"outputs": []
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"cell_type": "code",
|
| 555 |
+
"source": [
|
| 556 |
+
"ffn_factor = 6\n",
|
| 557 |
+
"embedding_params = n_embd * vocab_size\n",
|
| 558 |
+
"attention_params = n_head * (n_embd // n_head * 2 * n_embd) * n_layer\n",
|
| 559 |
+
"\n",
|
| 560 |
+
"feedforward_params = n_embd * ffn_factor * n_layer * 2\n",
|
| 561 |
+
"total_params = embedding_params + attention_params + feedforward_params"
|
| 562 |
+
],
|
| 563 |
+
"metadata": {
|
| 564 |
+
"id": "60DleGHAgFcy"
|
| 565 |
+
},
|
| 566 |
+
"execution_count": null,
|
| 567 |
+
"outputs": []
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"cell_type": "code",
|
| 571 |
+
"source": [
|
| 572 |
+
"# summary\n",
|
| 573 |
+
"print('//// Summary ////')\n",
|
| 574 |
+
"print(f\"total no of words in the file: {total_no_of_words / 1e6} million\")\n",
|
| 575 |
+
"print(f\"total predicted parameters: {total_params / 1e6} million\")\n",
|
| 576 |
+
"print(f\"actual no of parameters: {n_param} million\")\n",
|
| 577 |
+
"print(f\"total time taken to run the model was {total_time / 3600} hrs\")\n",
|
| 578 |
+
"print(f\"model ran for {max_iters} iterations and final val loss: {val_losses[-1]} and train loss: {train_losses[-1]}\")\n",
|
| 579 |
+
"print('\\n')\n",
|
| 580 |
+
"print(\"/// output ///\")\n",
|
| 581 |
+
"print(f\"I gave input text as: '{input_from_user}'\")\n",
|
| 582 |
+
"print(f\"generated output was {generated_output}\")"
|
| 583 |
+
],
|
| 584 |
+
"metadata": {
|
| 585 |
+
"id": "GWsCV-S72a1G"
|
| 586 |
+
},
|
| 587 |
+
"execution_count": null,
|
| 588 |
+
"outputs": []
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"cell_type": "code",
|
| 592 |
+
"source": [
|
| 593 |
+
"from google.colab import files\n",
|
| 594 |
+
"files.download('transformer_model.pth')"
|
| 595 |
+
],
|
| 596 |
+
"metadata": {
|
| 597 |
+
"id": "VfkCY1IPuege"
|
| 598 |
+
},
|
| 599 |
+
"execution_count": null,
|
| 600 |
+
"outputs": []
|
| 601 |
+
}
|
| 602 |
+
]
|
| 603 |
+
}
|