feat: 0.7 on COURSEMO
This commit is contained in:
parent
ded1032825
commit
2d95b112c5
@ -309,120 +309,67 @@
|
||||
"\n",
|
||||
"#### Model Template\n",
|
||||
"\n",
|
||||
"Note that you should copy and paste the code below *directly* into Coursemology for submission. You should probably test the code in this notebook on your local machine before uploading to Coursemology and using up an attempt. "
|
||||
"Note that you should copy and paste the code below *directly* into Coursemology for submission. You should probably test the code in this notebook on your local machine before uploading to Coursemology and using up an attempt.\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 65,
|
||||
"execution_count": 72,
|
||||
"id": "a44b7aa4",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T07:13:16.866724Z",
|
||||
"start_time": "2024-04-28T07:13:16.841471Z"
|
||||
"end_time": "2024-04-28T12:00:17.228662Z",
|
||||
"start_time": "2024-04-28T12:00:17.209494Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from torch import nn\n",
|
||||
"class tinyCNN(nn.Module):\n",
|
||||
" def __init__(self, num_classes):\n",
|
||||
" super(tinyCNN, self).__init__()\n",
|
||||
"import numpy as np\n",
|
||||
"import torch\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
" self.conv1 = nn.Conv2d(1,32,3,stride=1,padding=0)\n",
|
||||
" self.conv2 = nn.Conv2d(32,32,3,stride=1,padding=0)\n",
|
||||
" self.conv3 = nn.Conv2d(32,64,3,stride=1,padding=0)\n",
|
||||
" self.conv4 = nn.Conv2d(64,64,3,stride=1,padding=0)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"class CNN3D(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(CNN3D, self).__init__()\n",
|
||||
" self.conv1 = nn.Conv3d(1, 32, kernel_size=3, stride=1, padding=1)\n",
|
||||
" self.conv2 = nn.Conv3d(32, 64, kernel_size=3, stride=1, padding=1)\n",
|
||||
" self.batchnorm = nn.BatchNorm3d(32)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
" self.maxpool = nn.MaxPool2d(2)\n",
|
||||
" self.batchnorm1 = nn.BatchNorm2d(32)\n",
|
||||
" self.batchnorm2 = nn.BatchNorm2d(64)\n",
|
||||
" self.fc = nn.Linear(64, num_classes)\n",
|
||||
" self.flatten = nn.Flatten()\n",
|
||||
" self.maxpool = nn.MaxPool3d(kernel_size=2, stride=2)\n",
|
||||
" self.fc1 = nn.Linear(1024, 256) # Calculate input size based on output from conv3\n",
|
||||
" self.fc2 = nn.Linear(256, 6)\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv1(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" \n",
|
||||
" x = self.batchnorm1(x)\n",
|
||||
" x = self.maxpool(x)\n",
|
||||
" \n",
|
||||
" x = self.batchnorm(x)\n",
|
||||
" x = self.conv2(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
"\n",
|
||||
" x = self.conv3(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
"\n",
|
||||
" x = self.batchnorm2(x)\n",
|
||||
" x = self.maxpool(x)\n",
|
||||
" x = self.flatten(x)\n",
|
||||
" x = self.fc(x)\n",
|
||||
" return x\n",
|
||||
" \n",
|
||||
"class CIFARCNN(nn.Module):\n",
|
||||
" def __init__(self, classes):\n",
|
||||
" super().__init__()\n",
|
||||
" \"\"\"\n",
|
||||
" classes: integer that corresponds to the number of classes for CIFAR-10\n",
|
||||
" \"\"\"\n",
|
||||
" self.flatten = nn.Flatten()\n",
|
||||
" self.conv = nn.Sequential(\n",
|
||||
" nn.Conv2d(1, 32, 3),\n",
|
||||
" nn.MaxPool2d(2),\n",
|
||||
" nn.LeakyReLU(0.1),\n",
|
||||
" nn.Conv2d(32, 64, (3, 3)),\n",
|
||||
" nn.MaxPool2d(2),\n",
|
||||
" nn.LeakyReLU(0.1),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.fc = nn.Sequential(\n",
|
||||
" nn.Linear(256, 256),\n",
|
||||
" nn.LeakyReLU(0.1),\n",
|
||||
" nn.Linear(256, 128),\n",
|
||||
" nn.LeakyReLU(0.1),\n",
|
||||
" nn.Linear(128, classes)\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" def forward(self, x):\n",
|
||||
" # YOUR CODE HERE\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.flatten(x)\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = x.view(x.size(0), -1) # Flatten features for fully connected layers\n",
|
||||
" x = self.fc1(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" x = self.fc2(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"# video is a numpy array of shape (L, H, W)\n",
|
||||
"def clean_batch(batch):\n",
|
||||
" batch = np.array(batch)\n",
|
||||
" temp_x = batch.reshape(-1, 256)\n",
|
||||
" np.nan_to_num(temp_x, copy=False)\n",
|
||||
" col_mean = np.nanmean(temp_x, axis=0)\n",
|
||||
" inds = np.where(np.isnan(temp_x))\n",
|
||||
" temp_x[inds] = np.take(col_mean, inds[1])\n",
|
||||
" temp_x = np.clip(temp_x, 1, 255)\n",
|
||||
" batch = temp_x.reshape(-1, 1, 16,16)\n",
|
||||
" return torch.tensor(batch, dtype=torch.float32)\n",
|
||||
"def flatten_data(X, y):\n",
|
||||
" not_nan_indices = np.argwhere(~np.isnan(np.array(y))).squeeze()\n",
|
||||
" # Remove non y columns\n",
|
||||
" y = [y[i] for i in not_nan_indices]\n",
|
||||
" X = [X[i] for i in not_nan_indices]\n",
|
||||
" flattened_x = [video[i] for video in X for i in range(video.shape[0])]\n",
|
||||
" flattened_y = np.repeat(y, [video.shape[0] for video in X])\n",
|
||||
" flattened_x = clean_batch(flattened_x)\n",
|
||||
" return flattened_x, torch.Tensor(np.array(flattened_y, dtype=np.int64)).long()\n",
|
||||
"\n",
|
||||
"def train(model, criterion, optimizer, loader, epochs = 10):\n",
|
||||
"def train(model, criterion, optimizer, loader, epochs=5):\n",
|
||||
" for epoch in range(epochs):\n",
|
||||
" for idx, (inputs, labels) in enumerate(loader):\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" outputs = model(inputs)\n",
|
||||
" # print(outputs)\n",
|
||||
" loss = criterion(outputs, labels)\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" print(f'Epoch {epoch}, Loss: {loss.item()}')\n",
|
||||
" return model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def process_data(X, y):\n",
|
||||
" y = np.array(y)\n",
|
||||
" X = np.array([video[:6] for video in X])\n",
|
||||
@ -432,45 +379,51 @@
|
||||
" # Replace NaNs in each frame, with the average of the frame. This was generated with GPT\n",
|
||||
" for i in range(tensor_videos.shape[0]):\n",
|
||||
" for j in range(tensor_videos.shape[1]):\n",
|
||||
" tensor_videos[i][j][torch.isnan(tensor_videos[i][j])] = torch.mean(tensor_videos[i][j][~torch.isnan(tensor_videos[i][j])])\n",
|
||||
" \n",
|
||||
" tensor_videos[i][j][torch.isnan(tensor_videos[i][j])] = torch.mean(\n",
|
||||
" tensor_videos[i][j][~torch.isnan(tensor_videos[i][j])])\n",
|
||||
" # Undersample the data for each of the 6 classes. Select max of 300 samples for each class\n",
|
||||
" # Very much generated with the assitance of chatGPT with some modifications\n",
|
||||
" # Get the indices of each class\n",
|
||||
" indices = [np.argwhere(y == i).squeeze(1) for i in range(6)]\n",
|
||||
" # Get the number of samples to take for each class\n",
|
||||
" num_samples_to_take = 300\n",
|
||||
" num_samples_to_take = 600\n",
|
||||
" # Get the indices of the samples to take\n",
|
||||
" indices_to_take = [np.random.choice(indices[i], num_samples_to_take, replace=True) for i in range(6)]\n",
|
||||
" # Concatenate the indices\n",
|
||||
" indices_to_take = np.concatenate(indices_to_take)\n",
|
||||
" # Select the samples\n",
|
||||
" tensor_videos = tensor_videos[indices_to_take]\n",
|
||||
" tensor_videos = tensor_videos[indices_to_take].unsqueeze(1)\n",
|
||||
" y = y[indices_to_take]\n",
|
||||
" return torch.Tensor(tensor_videos), torch.Tensor(y)\n",
|
||||
" return torch.Tensor(tensor_videos), torch.Tensor(y).long()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Model():\n",
|
||||
" def __init__(self):\n",
|
||||
" self.cnn = CIFARCNN(6)\n",
|
||||
" self.model = CNN3D()\n",
|
||||
" self.criterion = nn.CrossEntropyLoss()\n",
|
||||
" self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001)\n",
|
||||
"\n",
|
||||
" def fit(self, X, y):\n",
|
||||
" self.cnn.train()\n",
|
||||
" X, y = process_data(X, y)\n",
|
||||
" print(X.shape, y.shape)\n",
|
||||
" train_dataset = torch.utils.data.TensorDataset(X, y)\n",
|
||||
" train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
|
||||
" criterion = nn.CrossEntropyLoss()\n",
|
||||
" optimizer = torch.optim.Adam(self.cnn.parameters(), lr=0.001)\n",
|
||||
" self.model = train(self.cnn, criterion, optimizer, train_loader)\n",
|
||||
" return self\n",
|
||||
" train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)\n",
|
||||
" train(self.model, self.criterion, self.optimizer, train_loader, 10)\n",
|
||||
"\n",
|
||||
" def predict(self, X):\n",
|
||||
" self.cnn.eval()\n",
|
||||
" results = []\n",
|
||||
" for idx, batch in enumerate(X):\n",
|
||||
" batch = clean_batch(batch)\n",
|
||||
" pred = self.cnn(batch)\n",
|
||||
" result = torch.argmax(pred, axis=1)\n",
|
||||
" results.append(torch.max(result))\n",
|
||||
" return results\n"
|
||||
" self.model.eval()\n",
|
||||
" with torch.no_grad():\n",
|
||||
" X = np.array([video[:6] for video in X])\n",
|
||||
" tensor_videos = torch.tensor(X, dtype=torch.float32)\n",
|
||||
" # Clip values to 0 and 255\n",
|
||||
" tensor_videos = np.clip(tensor_videos, 0, 255)\n",
|
||||
" # Replace NaNs in each frame, with the average of the frame. This was generated with GPT\n",
|
||||
" for i in range(tensor_videos.shape[0]):\n",
|
||||
" for j in range(tensor_videos.shape[1]):\n",
|
||||
" tensor_videos[i][j][torch.isnan(tensor_videos[i][j])] = torch.mean(\n",
|
||||
" tensor_videos[i][j][~torch.isnan(tensor_videos[i][j])])\n",
|
||||
" X = torch.Tensor(tensor_videos.unsqueeze(1))\n",
|
||||
" result = self.model(X)\n",
|
||||
" return torch.max(result, dim=1)[1].numpy()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -485,12 +438,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"execution_count": 73,
|
||||
"id": "4f4dd489",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T07:10:17.037378Z",
|
||||
"start_time": "2024-04-28T07:10:17.031404Z"
|
||||
"end_time": "2024-04-28T12:00:19.363096Z",
|
||||
"start_time": "2024-04-28T12:00:19.352424Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
@ -505,12 +458,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"execution_count": 74,
|
||||
"id": "3064e0ff",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T07:10:18.316631Z",
|
||||
"start_time": "2024-04-28T07:10:18.289375Z"
|
||||
"end_time": "2024-04-28T12:00:20.265060Z",
|
||||
"start_time": "2024-04-28T12:00:20.234748Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
@ -524,12 +477,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 67,
|
||||
"execution_count": 75,
|
||||
"id": "27c9fd10",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T07:14:48.901477Z",
|
||||
"start_time": "2024-04-28T07:14:48.343775Z"
|
||||
"end_time": "2024-04-28T12:00:37.185569Z",
|
||||
"start_time": "2024-04-28T12:00:22.239036Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
@ -537,30 +490,19 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"torch.Size([1800, 6, 16, 16]) torch.Size([1800])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "RuntimeError",
|
||||
"evalue": "Given groups=1, weight of size [32, 1, 3, 3], expected input[32, 6, 16, 16] to have 1 channels, but got 6 channels instead",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
|
||||
"\u001B[0;31mRuntimeError\u001B[0m Traceback (most recent call last)",
|
||||
"File \u001B[0;32m<timed exec>:12\u001B[0m\n",
|
||||
"Cell \u001B[0;32mIn[65], line 137\u001B[0m, in \u001B[0;36mModel.fit\u001B[0;34m(self, X, y)\u001B[0m\n\u001B[1;32m 135\u001B[0m criterion \u001B[38;5;241m=\u001B[39m nn\u001B[38;5;241m.\u001B[39mCrossEntropyLoss()\n\u001B[1;32m 136\u001B[0m optimizer \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39moptim\u001B[38;5;241m.\u001B[39mAdam(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mcnn\u001B[38;5;241m.\u001B[39mparameters(), lr\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m0.001\u001B[39m)\n\u001B[0;32m--> 137\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mmodel \u001B[38;5;241m=\u001B[39m \u001B[43mtrain\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcnn\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcriterion\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43moptimizer\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mtrain_loader\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 138\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\n",
|
||||
"Cell \u001B[0;32mIn[65], line 94\u001B[0m, in \u001B[0;36mtrain\u001B[0;34m(model, criterion, optimizer, loader, epochs)\u001B[0m\n\u001B[1;32m 92\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m idx, (inputs, labels) \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28menumerate\u001B[39m(loader):\n\u001B[1;32m 93\u001B[0m optimizer\u001B[38;5;241m.\u001B[39mzero_grad()\n\u001B[0;32m---> 94\u001B[0m outputs \u001B[38;5;241m=\u001B[39m \u001B[43mmodel\u001B[49m\u001B[43m(\u001B[49m\u001B[43minputs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 95\u001B[0m loss \u001B[38;5;241m=\u001B[39m criterion(outputs, labels)\n\u001B[1;32m 96\u001B[0m loss\u001B[38;5;241m.\u001B[39mbackward()\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/module.py:1511\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 1509\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs) \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[1;32m 1510\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1511\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/module.py:1520\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 1515\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[1;32m 1516\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[1;32m 1517\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[1;32m 1518\u001B[0m \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[1;32m 1519\u001B[0m \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[0;32m-> 1520\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1522\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 1523\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
|
||||
"Cell \u001B[0;32mIn[65], line 64\u001B[0m, in \u001B[0;36mCIFARCNN.forward\u001B[0;34m(self, x)\u001B[0m\n\u001B[1;32m 62\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mforward\u001B[39m(\u001B[38;5;28mself\u001B[39m, x):\n\u001B[1;32m 63\u001B[0m \u001B[38;5;66;03m# YOUR CODE HERE\u001B[39;00m\n\u001B[0;32m---> 64\u001B[0m x \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconv\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 65\u001B[0m x \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mflatten(x)\n\u001B[1;32m 66\u001B[0m x \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfc(x)\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/module.py:1511\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 1509\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs) \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[1;32m 1510\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1511\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/module.py:1520\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 1515\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[1;32m 1516\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[1;32m 1517\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[1;32m 1518\u001B[0m \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[1;32m 1519\u001B[0m \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[0;32m-> 1520\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1522\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 1523\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/container.py:217\u001B[0m, in \u001B[0;36mSequential.forward\u001B[0;34m(self, input)\u001B[0m\n\u001B[1;32m 215\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mforward\u001B[39m(\u001B[38;5;28mself\u001B[39m, \u001B[38;5;28minput\u001B[39m):\n\u001B[1;32m 216\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m module \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m:\n\u001B[0;32m--> 217\u001B[0m \u001B[38;5;28minput\u001B[39m \u001B[38;5;241m=\u001B[39m \u001B[43mmodule\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43minput\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[1;32m 218\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28minput\u001B[39m\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/module.py:1511\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 1509\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs) \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[1;32m 1510\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1511\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/module.py:1520\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 1515\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[1;32m 1516\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[1;32m 1517\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[1;32m 1518\u001B[0m \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[1;32m 1519\u001B[0m \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[0;32m-> 1520\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1522\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 1523\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/conv.py:460\u001B[0m, in \u001B[0;36mConv2d.forward\u001B[0;34m(self, input)\u001B[0m\n\u001B[1;32m 459\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mforward\u001B[39m(\u001B[38;5;28mself\u001B[39m, \u001B[38;5;28minput\u001B[39m: Tensor) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Tensor:\n\u001B[0;32m--> 460\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_conv_forward\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43minput\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mweight\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbias\u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"File \u001B[0;32m/nix/store/4mv9lb8b1vjx88y2i7px1r2s8p3xlr7d-python3-3.11.9-env/lib/python3.11/site-packages/torch/nn/modules/conv.py:456\u001B[0m, in \u001B[0;36mConv2d._conv_forward\u001B[0;34m(self, input, weight, bias)\u001B[0m\n\u001B[1;32m 452\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mpadding_mode \u001B[38;5;241m!=\u001B[39m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mzeros\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[1;32m 453\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m F\u001B[38;5;241m.\u001B[39mconv2d(F\u001B[38;5;241m.\u001B[39mpad(\u001B[38;5;28minput\u001B[39m, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_reversed_padding_repeated_twice, mode\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mpadding_mode),\n\u001B[1;32m 454\u001B[0m weight, bias, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mstride,\n\u001B[1;32m 455\u001B[0m _pair(\u001B[38;5;241m0\u001B[39m), \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdilation, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mgroups)\n\u001B[0;32m--> 456\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mF\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconv2d\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43minput\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mweight\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbias\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mstride\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 457\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mpadding\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mdilation\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mgroups\u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"\u001B[0;31mRuntimeError\u001B[0m: Given groups=1, weight of size [32, 1, 3, 3], expected input[32, 6, 16, 16] to have 1 channels, but got 6 channels instead"
|
||||
"Epoch 0, Loss: 0.7495917081832886\n",
|
||||
"Epoch 1, Loss: 0.42713749408721924\n",
|
||||
"Epoch 2, Loss: 0.21424821019172668\n",
|
||||
"Epoch 3, Loss: 0.02086367830634117\n",
|
||||
"Epoch 4, Loss: 0.005386564414948225\n",
|
||||
"Epoch 5, Loss: 0.00319607718847692\n",
|
||||
"Epoch 6, Loss: 0.007663913071155548\n",
|
||||
"Epoch 7, Loss: 0.003004509722813964\n",
|
||||
"Epoch 8, Loss: 0.0044013322331011295\n",
|
||||
"Epoch 9, Loss: 0.0016760551370680332\n",
|
||||
"F1 Score (macro): 0.75\n",
|
||||
"CPU times: user 57.8 s, sys: 1min 12s, total: 2min 10s\n",
|
||||
"Wall time: 14.9 s\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -506,12 +506,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 230,
|
||||
"execution_count": 238,
|
||||
"id": "d8dffd7d",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T07:57:09.790124Z",
|
||||
"start_time": "2024-04-28T07:57:09.780591Z"
|
||||
"end_time": "2024-04-28T08:00:54.037178Z",
|
||||
"start_time": "2024-04-28T08:00:54.027410Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
@ -524,7 +524,6 @@
|
||||
" self.mp = nn.AvgPool3d(2)\n",
|
||||
" self.relu = nn.LeakyReLU()\n",
|
||||
" self.fc1 = nn.Linear(3888, 6)\n",
|
||||
" self.fc2 = nn.Linear(128, 6)\n",
|
||||
" self.flatten = nn.Flatten()\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv1(x)\n",
|
||||
@ -534,8 +533,6 @@
|
||||
" # print(x.shape)\n",
|
||||
" \n",
|
||||
" x = x.view(-1, 3888)\n",
|
||||
" x = self.fc1(x)\n",
|
||||
" # x = self.fc2(x)\n",
|
||||
" return x\n",
|
||||
" \n",
|
||||
"def train(model, criterion, optimizer, loader, epochs = 10):\n",
|
||||
@ -548,8 +545,7 @@
|
||||
" optimizer.step()\n",
|
||||
" print(f'Epoch {epoch}, Loss: {loss.item()}')\n",
|
||||
" return model\n",
|
||||
"def process_data(X, y):\n",
|
||||
" y = np.array(y)\n",
|
||||
"def process_X(X):\n",
|
||||
" X = np.array([video[:6] for video in X])\n",
|
||||
" tensor_videos = torch.tensor(X, dtype=torch.float32)\n",
|
||||
" # Clip values to 0 and 255\n",
|
||||
@ -558,6 +554,11 @@
|
||||
" for i in range(tensor_videos.shape[0]):\n",
|
||||
" for j in range(tensor_videos.shape[1]):\n",
|
||||
" tensor_videos[i][j][torch.isnan(tensor_videos[i][j])] = torch.mean(tensor_videos[i][j][~torch.isnan(tensor_videos[i][j])])\n",
|
||||
" return tensor_videos\n",
|
||||
" \n",
|
||||
"def process_data(X, y):\n",
|
||||
" y = np.array(y)\n",
|
||||
" tensor_videos = process_X(X)\n",
|
||||
" # Undersample the data for each of the 6 classes. Select max of 300 samples for each class\n",
|
||||
" # Very much generated with the assitance of chatGPT with some modifications\n",
|
||||
" # Get the indices of each class\n",
|
||||
@ -607,12 +608,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 217,
|
||||
"execution_count": 239,
|
||||
"id": "9245ab47",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T07:55:53.563103Z",
|
||||
"start_time": "2024-04-28T07:55:53.544134Z"
|
||||
"end_time": "2024-04-28T08:00:56.273946Z",
|
||||
"start_time": "2024-04-28T08:00:56.253771Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
@ -640,17 +641,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 0, Loss: 4.225716590881348\n",
|
||||
"Epoch 1, Loss: 0.9198675155639648\n",
|
||||
"Epoch 2, Loss: 1.7365752458572388\n",
|
||||
"Epoch 3, Loss: 0.4570190906524658\n",
|
||||
"Epoch 4, Loss: 0.11014104634523392\n",
|
||||
"Epoch 5, Loss: 0.24420055747032166\n",
|
||||
"Epoch 6, Loss: 0.03079795092344284\n",
|
||||
"Epoch 7, Loss: 0.07790327817201614\n",
|
||||
"Epoch 8, Loss: 0.07603466510772705\n",
|
||||
"Epoch 9, Loss: 0.04154537618160248\n",
|
||||
"F1 Score (macro): 0.51\n"
|
||||
"Epoch 0, Loss: 85.83575439453125\n",
|
||||
"Epoch 1, Loss: 43.13077926635742\n",
|
||||
"Epoch 2, Loss: 13.879751205444336\n",
|
||||
"Epoch 3, Loss: 3.084989070892334\n",
|
||||
"Epoch 4, Loss: 5.557327747344971\n",
|
||||
"Epoch 5, Loss: 3.1260528564453125\n",
|
||||
"Epoch 6, Loss: 3.4430527687072754\n",
|
||||
"Epoch 7, Loss: 5.166628837585449\n",
|
||||
"Epoch 8, Loss: 4.4976654052734375\n",
|
||||
"Epoch 9, Loss: 5.530020236968994\n",
|
||||
"F1 Score (macro): 0.02\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -666,12 +667,12 @@
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T07:57:38.644155Z",
|
||||
"start_time": "2024-04-28T07:57:35.958882Z"
|
||||
"end_time": "2024-04-28T08:01:04.071319Z",
|
||||
"start_time": "2024-04-28T08:01:01.436939Z"
|
||||
}
|
||||
},
|
||||
"id": "abb2d957f4a15bd2",
|
||||
"execution_count": 235
|
||||
"execution_count": 241
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
Loading…
Reference in New Issue
Block a user