\n\n
\n \n \n | \n V0 | \n V1 | \n V3 | \n V4 | \n V5 | \n V6 | \n V8 | \n V9 | \n V12 | \n V13 | \n ... | \n V56 | \n V57 | \n V58 | \n V59 | \n 8 | \n 10 | \n 17 | \n 27 | \n 31 | \n 59 | \n
\n \n \n \n 0 | \n 8315.0 | \n 1784.0 | \n 37115.0 | \n 317.0 | \n 105.016815 | \n 296559.0 | \n 2470.0 | \n 1.0 | \n 85.0 | \n 737.0 | \n ... | \n 1089 | \n 293 | \n 2.0 | \n 7428.249334 | \n 0.249110 | \n 0.283362 | \n -1.523953 | \n -0.689523 | \n -0.637881 | \n 1.465378 | \n
\n \n 1 | \n 8315.0 | \n 1272.0 | \n 18683.0 | \n 230.0 | \n NaN | \n 340059.0 | \n 2820.0 | \n 0.0 | \n 42.0 | \n 585.0 | \n ... | \n 9801 | \n 1085 | \n 7.0 | \n 9693.829502 | \n -1.144696 | \n -1.343454 | \n -0.425715 | \n -1.246596 | \n -1.090949 | \n -0.852887 | \n
\n \n 2 | \n 8315.0 | \n 3832.0 | \n 147707.0 | \n 607.0 | \n 105.018240 | \n 279159.0 | \n 2330.0 | \n 1.0 | \n 335.0 | \n 1041.0 | \n ... | \n 1485 | \n 304 | \n 6.0 | \n 7609.258214 | \n 0.129641 | \n -0.258910 | \n 0.306444 | \n 1.538767 | \n 1.627457 | \n -0.080132 | \n
\n \n 3 | \n 8315.0 | \n 2296.0 | \n 55547.0 | \n 404.0 | \n NaN | \n 313959.0 | \n 2610.0 | \n 1.0 | \n 113.0 | \n 889.0 | \n ... | \n -495 | \n 711 | \n 4.0 | \n 4258.532609 | \n 0.726987 | \n 0.283362 | \n 0.672524 | \n -0.132450 | \n -0.184813 | \n -0.080132 | \n
\n \n 4 | \n 11021.0 | \n 1784.0 | \n 37115.0 | \n 375.0 | \n 105.024985 | \n 232701.0 | \n 1490.0 | \n 0.0 | \n 186.0 | \n 737.0 | \n ... | \n 1683 | \n 117 | \n 0.0 | \n 9492.484802 | \n 0.249110 | \n 0.283362 | \n -0.425715 | \n -0.410987 | \n -0.637881 | \n 1.465378 | \n
\n \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n ... | \n
\n \n 357694 | \n 8315.0 | \n 1272.0 | \n 18683.0 | \n 230.0 | \n 105.012445 | \n 270459.0 | \n 2260.0 | \n 0.0 | \n 4.0 | \n 585.0 | \n ... | \n 6336 | \n 1855 | \n 2.0 | \n 4634.276235 | \n -4.290717 | \n -2.427998 | \n -4.086510 | \n -1.246596 | \n -1.090949 | \n 2.238133 | \n
\n \n 357695 | \n 8315.0 | \n 2296.0 | \n 55547.0 | \n 404.0 | \n NaN | \n 244359.0 | \n 2050.0 | \n 0.0 | \n 110.0 | \n 889.0 | \n ... | \n 2970 | \n 854 | \n 8.0 | \n 8379.073980 | \n 0.129641 | \n -0.258910 | \n 0.306444 | \n -0.132450 | \n -0.184813 | \n -0.080132 | \n
\n \n 357696 | \n 8315.0 | \n 1784.0 | \n 37115.0 | \n 375.0 | \n NaN | \n 348759.0 | \n 2890.0 | \n 0.0 | \n 163.0 | \n 737.0 | \n ... | \n -4257 | \n 942 | \n 8.0 | \n 5359.986193 | \n 0.408403 | \n 0.283362 | \n 0.672524 | \n -0.410987 | \n -0.637881 | \n -0.080132 | \n
\n \n 357697 | \n 8315.0 | \n 1784.0 | \n 37115.0 | \n 375.0 | \n 105.016815 | \n 348759.0 | \n 2890.0 | \n 0.0 | \n 147.0 | \n 737.0 | \n ... | \n 2376 | \n 1195 | \n 7.0 | \n 9095.239127 | \n 0.726987 | \n 0.283362 | \n 0.672524 | \n -0.410987 | \n -0.637881 | \n -0.080132 | \n
\n \n 357698 | \n 8315.0 | \n 1784.0 | \n 37115.0 | \n 317.0 | \n NaN | \n 244359.0 | \n 2050.0 | \n 0.0 | \n 46.0 | \n 737.0 | \n ... | \n 9108 | \n 502 | \n 3.0 | \n 9379.720939 | \n 0.129641 | \n -0.258910 | \n 0.306444 | \n -0.689523 | \n -0.637881 | \n -0.080132 | \n
\n \n
\n
357699 rows × 46 columns
\n
"
+ },
+ "execution_count": 122,
+ "metadata": {},
+ "output_type": "execute_result"
}
],
"source": [
- "print(flattened_images.shape)"
+ "X_dropped = X.drop(columns_to_drop, axis=1)\n",
+ "X_dropped"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
- "end_time": "2024-04-16T09:10:48.089387Z",
- "start_time": "2024-04-16T09:10:48.083321Z"
+ "end_time": "2024-04-27T07:50:42.584344Z",
+ "start_time": "2024-04-27T07:50:42.498150Z"
}
},
- "id": "d996a04b28b2d1be",
- "execution_count": 101
+ "id": "b8383cb1d724181c",
+ "execution_count": 122
+ },
+ {
+ "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "(357699, 46)\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(X_dropped.shape)"
+ ],
+ "metadata": {
+ "collapsed": false,
+ "ExecuteTime": {
+ "end_time": "2024-04-27T07:54:05.509713Z",
+ "start_time": "2024-04-27T07:54:05.505067Z"
+ }
+ },
+ "id": "c64798f73ec3412f",
+ "execution_count": 134
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### 2. Detection and Handling of Missing Values"
+ ],
+ "metadata": {},
+ "id": "adb61967"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 135,
+ "id": "4bb9cdfb",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-04-27T07:54:06.587195Z",
+ "start_time": "2024-04-27T07:54:06.478662Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# For the columns with nan's that are not the object columns, fill them with mean\n",
+ "# For the object columns, fill them with the mode\n",
+ "X_missing = X_dropped.fillna(X_dropped.mean())\n",
+ "# TODO: Replace with mode for object columns"
+ ]
},
{
"cell_type": "markdown",
@@ -369,16 +399,57 @@
},
{
"cell_type": "code",
- "execution_count": 100,
- "id": "ed1c17a1",
+ "outputs": [],
+ "source": [
+ "# Time to do PCA\n",
+ "from sklearn.decomposition import PCA\n",
+ "pca = PCA(n_components=30)\n",
+ "X_pca = pca.fit_transform(X_missing)\n",
+ "# plt.scatter(X_pca[:, 0], X_pca[:, 1], c=Y)\n",
+ "# plt.colorbar()\n",
+ "# plt.show()"
+ ],
"metadata": {
+ "collapsed": false,
"ExecuteTime": {
- "end_time": "2024-04-16T09:10:46.281705Z",
- "start_time": "2024-04-16T09:10:46.278864Z"
+ "end_time": "2024-04-27T07:59:38.839920Z",
+ "start_time": "2024-04-27T07:59:34.454737Z"
}
},
- "outputs": [],
- "source": []
+ "id": "878c95195942e270",
+ "execution_count": 151
+ },
+ {
+ "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "0.9999008890228839\n",
+ "2\n"
+ ]
+ }
+ ],
+ "source": [
+ "res = 0\n",
+ "variance = pca.explained_variance_ratio_\n",
+ "for i in range(len(variance)):\n",
+ " if np.sum(variance[0:i]) >= 0.99:\n",
+ " res = i\n",
+ " break\n",
+ "print(np.sum(variance[:res]))\n",
+ "print(res)\n"
+ ],
+ "metadata": {
+ "collapsed": false,
+ "ExecuteTime": {
+ "end_time": "2024-04-27T07:59:42.918071Z",
+ "start_time": "2024-04-27T07:59:42.915297Z"
+ }
+ },
+ "id": "724586267e51a3c5",
+ "execution_count": 155
},
{
"cell_type": "markdown",
@@ -502,11 +573,63 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 158,
"id": "d8dffd7d",
- "metadata": {},
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-04-27T08:00:10.769193Z",
+ "start_time": "2024-04-27T08:00:10.657136Z"
+ }
+ },
"outputs": [],
- "source": []
+ "source": [
+ "# Split the data into train and test\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "from sklearn.linear_model import LinearRegression\n",
+ "from sklearn.metrics import mean_squared_error\n",
+ "X_train, X_test, y_train, y_test = train_test_split(X_missing, Y, test_size=0.2, random_state=42)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "MSE: 5311.417393315556\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Linear Regression\n",
+ "# # Train the model\n",
+ "model = LinearRegression()\n",
+ "model.fit(X_train, y_train)\n",
+ "# # Predict\n",
+ "y_pred = model.predict(X_test)\n",
+ "# # Evaluate\n",
+ "mse = mean_squared_error(y_test, y_pred)\n",
+ "print('MSE:', mse)\n"
+ ],
+ "metadata": {
+ "collapsed": false,
+ "ExecuteTime": {
+ "end_time": "2024-04-27T08:00:12.375826Z",
+ "start_time": "2024-04-27T08:00:11.942486Z"
+ }
+ },
+ "id": "9864de4426d22d9b",
+ "execution_count": 159
+ },
+ {
+ "cell_type": "code",
+ "outputs": [],
+ "source": [],
+ "metadata": {
+ "collapsed": false
+ },
+ "id": "5381f534af74f626"
},
{
"cell_type": "markdown",
diff --git a/cs2109s/labs/final/main.ipynb b/cs2109s/labs/final/main.ipynb
index 45b15d1..5bf0a76 100644
--- a/cs2109s/labs/final/main.ipynb
+++ b/cs2109s/labs/final/main.ipynb
@@ -314,94 +314,163 @@
},
{
"cell_type": "code",
- "execution_count": 224,
+ "execution_count": 65,
"id": "a44b7aa4",
"metadata": {
"ExecuteTime": {
- "end_time": "2024-04-27T15:21:54.599035Z",
- "start_time": "2024-04-27T15:21:54.587252Z"
+ "end_time": "2024-04-28T07:13:16.866724Z",
+ "start_time": "2024-04-28T07:13:16.841471Z"
}
},
"outputs": [],
"source": [
- "from torch.utils.data import TensorDataset, DataLoader\n",
"import torch\n",
"from torch import nn\n",
+ "class tinyCNN(nn.Module):\n",
+ " def __init__(self, num_classes):\n",
+ " super(tinyCNN, self).__init__()\n",
"\n",
- "class Model: \n",
- " \"\"\"\n",
- " This class represents an AI model.\n",
- " \"\"\"\n",
+ " self.conv1 = nn.Conv2d(1,32,3,stride=1,padding=0)\n",
+ " self.conv2 = nn.Conv2d(32,32,3,stride=1,padding=0)\n",
+ " self.conv3 = nn.Conv2d(32,64,3,stride=1,padding=0)\n",
+ " self.conv4 = nn.Conv2d(64,64,3,stride=1,padding=0)\n",
+ " \n",
+ " self.relu = nn.ReLU()\n",
+ " self.maxpool = nn.MaxPool2d(2)\n",
+ " self.batchnorm1 = nn.BatchNorm2d(32)\n",
+ " self.batchnorm2 = nn.BatchNorm2d(64)\n",
+ " self.fc = nn.Linear(64, num_classes)\n",
+ " self.flatten = nn.Flatten()\n",
+ "\n",
+ " def forward(self, x):\n",
+ " x = self.conv1(x)\n",
+ " x = self.relu(x)\n",
+ " \n",
+ " x = self.batchnorm1(x)\n",
+ " x = self.maxpool(x)\n",
+ " \n",
+ " x = self.conv2(x)\n",
+ " x = self.relu(x)\n",
+ "\n",
+ " x = self.conv3(x)\n",
+ " x = self.relu(x)\n",
+ "\n",
+ " x = self.batchnorm2(x)\n",
+ " x = self.maxpool(x)\n",
+ " x = self.flatten(x)\n",
+ " x = self.fc(x)\n",
+ " return x\n",
" \n",
- " def __init__(self):\n",
+ "class CIFARCNN(nn.Module):\n",
+ " def __init__(self, classes):\n",
+ " super().__init__()\n",
" \"\"\"\n",
- " Constructor for Model class.\n",
- " \n",
- " Parameters\n",
- " ----------\n",
- " self : object\n",
- " The instance of the object passed by Python.\n",
+ " classes: integer that corresponds to the number of classes for CIFAR-10\n",
" \"\"\"\n",
- " # TODO: Replace the following code with your own initialization code.\n",
- " self.model = nn.Sequential(\n",
- " nn.LSTM(256, 128, 2, batch_first=True),\n",
- " nn.Linear(128, 6),\n",
+ " self.flatten = nn.Flatten()\n",
+ " self.conv = nn.Sequential(\n",
+ " nn.Conv2d(1, 32, 3),\n",
+ " nn.MaxPool2d(2),\n",
+ " nn.LeakyReLU(0.1),\n",
+ " nn.Conv2d(32, 64, (3, 3)),\n",
+ " nn.MaxPool2d(2),\n",
+ " nn.LeakyReLU(0.1),\n",
+ " )\n",
+ "\n",
+ " self.fc = nn.Sequential(\n",
+ " nn.Linear(256, 256),\n",
+ " nn.LeakyReLU(0.1),\n",
+ " nn.Linear(256, 128),\n",
+ " nn.LeakyReLU(0.1),\n",
+ " nn.Linear(128, classes)\n",
" )\n",
" \n",
- " def process(self, X):\n",
- " X_array = np.zeros((10, 16, 16))\n",
- " for i, video in enumerate(X):\n",
- " X_array[i, :, :] = video\n",
- " X_array = X_array.reshape((10, 1, 256))\n",
- " print(X_array.shape)\n",
- " \n",
- " return torch.from_numpy(X_array).float()\n",
+ " def forward(self, x):\n",
+ " # YOUR CODE HERE\n",
+ " x = self.conv(x)\n",
+ " x = self.flatten(x)\n",
+ " x = self.fc(x)\n",
+ " return x\n",
"\n",
+ "# video is a numpy array of shape (L, H, W)\n",
+ "def clean_batch(batch):\n",
+ " batch = np.array(batch)\n",
+ " temp_x = batch.reshape(-1, 256)\n",
+ " np.nan_to_num(temp_x, copy=False)\n",
+ " col_mean = np.nanmean(temp_x, axis=0)\n",
+ " inds = np.where(np.isnan(temp_x))\n",
+ " temp_x[inds] = np.take(col_mean, inds[1])\n",
+ " temp_x = np.clip(temp_x, 1, 255)\n",
+ " batch = temp_x.reshape(-1, 1, 16,16)\n",
+ " return torch.tensor(batch, dtype=torch.float32)\n",
+ "def flatten_data(X, y):\n",
+ " not_nan_indices = np.argwhere(~np.isnan(np.array(y))).squeeze()\n",
+ " # Remove non y columns\n",
+ " y = [y[i] for i in not_nan_indices]\n",
+ " X = [X[i] for i in not_nan_indices]\n",
+ " flattened_x = [video[i] for video in X for i in range(video.shape[0])]\n",
+ " flattened_y = np.repeat(y, [video.shape[0] for video in X])\n",
+ " flattened_x = clean_batch(flattened_x)\n",
+ " return flattened_x, torch.Tensor(np.array(flattened_y, dtype=np.int64)).long()\n",
+ "\n",
+ "def train(model, criterion, optimizer, loader, epochs = 10):\n",
+ " for epoch in range(epochs):\n",
+ " for idx, (inputs, labels) in enumerate(loader):\n",
+ " optimizer.zero_grad()\n",
+ " outputs = model(inputs)\n",
+ " loss = criterion(outputs, labels)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ " print(f'Epoch {epoch}, Loss: {loss.item()}')\n",
+ " return model\n",
+ "def process_data(X, y):\n",
+ " y = np.array(y)\n",
+ " X = np.array([video[:6] for video in X])\n",
+ " tensor_videos = torch.tensor(X, dtype=torch.float32)\n",
+ " # Clip values to 0 and 255\n",
+ " tensor_videos = np.clip(tensor_videos, 0, 255)\n",
+ " # Replace NaNs in each frame, with the average of the frame. This was generated with GPT\n",
+ " for i in range(tensor_videos.shape[0]):\n",
+ " for j in range(tensor_videos.shape[1]):\n",
+ " tensor_videos[i][j][torch.isnan(tensor_videos[i][j])] = torch.mean(tensor_videos[i][j][~torch.isnan(tensor_videos[i][j])])\n",
+ " \n",
+ " # Undersample the data for each of the 6 classes. Select max of 300 samples for each class\n",
+ " # Very much generated with the assitance of chatGPT with some modifications\n",
+ " # Get the indices of each class\n",
+ " indices = [np.argwhere(y == i).squeeze(1) for i in range(6)]\n",
+ " # Get the number of samples to take for each class\n",
+ " num_samples_to_take = 300\n",
+ " # Get the indices of the samples to take\n",
+ " indices_to_take = [np.random.choice(indices[i], num_samples_to_take, replace=True) for i in range(6)]\n",
+ " # Concatenate the indices\n",
+ " indices_to_take = np.concatenate(indices_to_take)\n",
+ " # Select the samples\n",
+ " tensor_videos = tensor_videos[indices_to_take]\n",
+ " y = y[indices_to_take]\n",
+ " return torch.Tensor(tensor_videos), torch.Tensor(y)\n",
+ "\n",
+ "class Model():\n",
+ " def __init__(self):\n",
+ " self.cnn = CIFARCNN(6)\n",
" def fit(self, X, y):\n",
- " \"\"\"\n",
- " Train the model using the input data.\n",
- " \n",
- " Parameters\n",
- " ----------\n",
- " X : list of size (n_samples)\n",
- " Each item in the list is a grayscale video of shape (L, H, W).\n",
- " L represents the length of the video, which may vary between videos. \n",
- " H and W represent the height and width, which are consistent across all videos. \n",
- " y : list of size (n_samples)\n",
- " Class labels for videos\n",
- " \n",
- " Returns\n",
- " -------\n",
- " self : object\n",
- " Returns an instance of the trained model.\n",
- " \"\"\"\n",
- " X = self.process(X)\n",
- " train_dataset = TensorDataset(X, torch.tensor(y))\n",
- " train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
- "\n",
+ " self.cnn.train()\n",
+ " X, y = process_data(X, y)\n",
+ " print(X.shape, y.shape)\n",
+ " train_dataset = torch.utils.data.TensorDataset(X, y)\n",
+ " train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
+ " criterion = nn.CrossEntropyLoss()\n",
+ " optimizer = torch.optim.Adam(self.cnn.parameters(), lr=0.001)\n",
+ " self.model = train(self.cnn, criterion, optimizer, train_loader)\n",
" return self\n",
- " \n",
" def predict(self, X):\n",
- " \"\"\"\n",
- " Use the trained model to make predictions.\n",
- " \n",
- " Parameters\n",
- " ----------\n",
- " X : list of size (n_samples)\n",
- " Each item in the list is a grayscale video of shape (L, H, W).\n",
- " L represents the length of the video, which may vary between videos. \n",
- " H and W represent the height and width, which are consistent across all videos. \n",
- " \n",
- " Returns\n",
- " -------\n",
- " ndarray of shape (n_samples,)\n",
- " Predicted target values per element in X.\n",
- " \n",
- " \"\"\"\n",
- " result = []\n",
- " for video in X:\n",
- " result.append(self.model(self.process(video)).argmax(dim=1).detach().numpy())\n",
- " return result"
+ " self.cnn.eval()\n",
+ " results = []\n",
+ " for idx, batch in enumerate(X):\n",
+ " batch = clean_batch(batch)\n",
+ " pred = self.cnn(batch)\n",
+ " result = torch.argmax(pred, axis=1)\n",
+ " results.append(torch.max(result))\n",
+ " return results\n"
]
},
{
@@ -416,12 +485,12 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 49,
"id": "4f4dd489",
"metadata": {
"ExecuteTime": {
- "end_time": "2024-04-27T13:36:15.463930Z",
- "start_time": "2024-04-27T13:36:15.298430Z"
+ "end_time": "2024-04-28T07:10:17.037378Z",
+ "start_time": "2024-04-28T07:10:17.031404Z"
}
},
"outputs": [],
@@ -436,12 +505,12 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 50,
"id": "3064e0ff",
"metadata": {
"ExecuteTime": {
- "end_time": "2024-04-27T13:36:18.535709Z",
- "start_time": "2024-04-27T13:36:18.509684Z"
+ "end_time": "2024-04-28T07:10:18.316631Z",
+ "start_time": "2024-04-28T07:10:18.289375Z"
}
},
"outputs": [],
@@ -455,12 +524,12 @@
},
{
"cell_type": "code",
- "execution_count": 225,
+ "execution_count": 67,
"id": "27c9fd10",
"metadata": {
"ExecuteTime": {
- "end_time": "2024-04-27T15:21:56.847320Z",
- "start_time": "2024-04-27T15:21:56.787331Z"
+ "end_time": "2024-04-28T07:14:48.901477Z",
+ "start_time": "2024-04-28T07:14:48.343775Z"
}
},
"outputs": [
@@ -468,31 +537,37 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "(10, 1, 256)\n"
+ "torch.Size([1800, 6, 16, 16]) torch.Size([1800])\n"
]
},
{
- "ename": "TypeError",
- "evalue": "linear(): argument 'input' (position 1) must be Tensor, not tuple",
+ "ename": "RuntimeError",
+ "evalue": "Given groups=1, weight of size [32, 1, 3, 3], expected input[32, 6, 16, 16] to have 1 channels, but got 6 channels instead",
"output_type": "error",
"traceback": [
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
- "\u001B[0;31mTypeError\u001B[0m Traceback (most recent call last)",
- "Cell \u001B[0;32mIn[225], line 13\u001B[0m\n\u001B[1;32m 11\u001B[0m model \u001B[38;5;241m=\u001B[39m Model()\n\u001B[1;32m 12\u001B[0m \u001B[38;5;66;03m# model.fit(X_train, y_train)\u001B[39;00m\n\u001B[0;32m---> 13\u001B[0m y_pred \u001B[38;5;241m=\u001B[39m \u001B[43mmodel\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mpredict\u001B[49m\u001B[43m(\u001B[49m\u001B[43mX_test\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 14\u001B[0m \u001B[38;5;28mprint\u001B[39m(y_pred[\u001B[38;5;241m0\u001B[39m])\n\u001B[1;32m 15\u001B[0m \u001B[38;5;28mprint\u001B[39m(y[\u001B[38;5;241m0\u001B[39m])\n",
- "Cell \u001B[0;32mIn[224], line 77\u001B[0m, in \u001B[0;36mModel.predict\u001B[0;34m(self, X)\u001B[0m\n\u001B[1;32m 75\u001B[0m result \u001B[38;5;241m=\u001B[39m []\n\u001B[1;32m 76\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m video \u001B[38;5;129;01min\u001B[39;00m X:\n\u001B[0;32m---> 77\u001B[0m result\u001B[38;5;241m.\u001B[39mappend(\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmodel\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mprocess\u001B[49m\u001B[43m(\u001B[49m\u001B[43mvideo\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241m.\u001B[39margmax(dim\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m)\u001B[38;5;241m.\u001B[39mdetach()\u001B[38;5;241m.\u001B[39mnumpy())\n\u001B[1;32m 78\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m result\n",
+ "\u001B[0;31mRuntimeError\u001B[0m Traceback (most recent call last)",
+ "File \u001B[0;32m