feat: 0.38 local
This commit is contained in:
@@ -21,31 +21,44 @@ import os
|
|||||||
class CNN3D(nn.Module):
|
class CNN3D(nn.Module):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(CNN3D, self).__init__()
|
super(CNN3D, self).__init__()
|
||||||
self.conv1 = nn.Conv3d(1, 12, 2, 1, 2)
|
self.conv1 = nn.Conv3d(1, 16, 2, 1, 2)
|
||||||
self.mp = nn.AvgPool3d(2)
|
self.batchnorm3d = nn.BatchNorm3d(16)
|
||||||
self.relu = nn.LeakyReLU()
|
self.batchnorm1d = nn.BatchNorm1d(64)
|
||||||
self.fc1 = nn.Linear(3888, 6)
|
|
||||||
self.fc2 = nn.Linear(128, 6)
|
self.dropout = nn.Dropout(0.5)
|
||||||
self.flatten = nn.Flatten()
|
self.mp3d = nn.AvgPool3d(2)
|
||||||
|
self.relu = nn.ReLU()
|
||||||
|
|
||||||
|
self.lstm = nn.LSTM(5184, 64, 1, batch_first=True)
|
||||||
|
self.fc2 = nn.Linear(64, 6)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
x = self.conv1(x)
|
x = self.conv1(x)
|
||||||
x = self.mp(x)
|
x = self.mp3d(x)
|
||||||
|
x = self.batchnorm3d(x)
|
||||||
x = self.relu(x)
|
x = self.relu(x)
|
||||||
|
x = self.dropout(x)
|
||||||
|
|
||||||
|
x = x.view(-1, 5184)
|
||||||
|
|
||||||
# print(x.shape)
|
# print(x.shape)
|
||||||
|
|
||||||
x = x.view(-1, 3888)
|
x, _ = self.lstm(x)
|
||||||
x = self.fc1(x)
|
# print(x.shape)
|
||||||
# x = self.fc2(x)
|
x = self.batchnorm1d(x)
|
||||||
return x
|
x = self.relu(x)
|
||||||
|
x = self.dropout(x)
|
||||||
|
|
||||||
|
x = self.fc2(x)
|
||||||
|
return torch.softmax(x, dim=1)
|
||||||
|
|
||||||
|
|
||||||
def train(model, criterion, optimizer, loader, epochs=10):
|
def train(model, criterion, optimizer, loader, epochs=20):
|
||||||
for epoch in range(epochs):
|
for epoch in range(epochs):
|
||||||
for idx, (inputs, labels) in enumerate(loader):
|
for idx, (inputs, labels) in enumerate(loader):
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
outputs = model(inputs)
|
outputs = model(inputs)
|
||||||
|
# print(outputs)
|
||||||
loss = criterion(outputs, labels)
|
loss = criterion(outputs, labels)
|
||||||
loss.backward()
|
loss.backward()
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
@@ -90,11 +103,11 @@ class Model():
|
|||||||
X, y = process_data(X, y)
|
X, y = process_data(X, y)
|
||||||
train_dataset = torch.utils.data.TensorDataset(X, y)
|
train_dataset = torch.utils.data.TensorDataset(X, y)
|
||||||
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
|
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
|
||||||
train(self.model, self.criterion, self.optimizer, train_loader)
|
train(self.model, self.criterion, self.optimizer, train_loader, 10)
|
||||||
|
|
||||||
def predict(self, X):
|
def predict(self, X):
|
||||||
self.model.eval()
|
self.model.eval()
|
||||||
|
with torch.no_grad():
|
||||||
X = np.array([video[:6] for video in X])
|
X = np.array([video[:6] for video in X])
|
||||||
tensor_videos = torch.tensor(X, dtype=torch.float32)
|
tensor_videos = torch.tensor(X, dtype=torch.float32)
|
||||||
# Clip values to 0 and 255
|
# Clip values to 0 and 255
|
||||||
@@ -105,7 +118,8 @@ class Model():
|
|||||||
tensor_videos[i][j][torch.isnan(tensor_videos[i][j])] = torch.mean(
|
tensor_videos[i][j][torch.isnan(tensor_videos[i][j])] = torch.mean(
|
||||||
tensor_videos[i][j][~torch.isnan(tensor_videos[i][j])])
|
tensor_videos[i][j][~torch.isnan(tensor_videos[i][j])])
|
||||||
X = torch.Tensor(tensor_videos.unsqueeze(1))
|
X = torch.Tensor(tensor_videos.unsqueeze(1))
|
||||||
return np.argmax(self.model(X).detach().numpy(), axis=1)
|
result = self.model(X)
|
||||||
|
return torch.max(result, dim=1)[1].numpy()
|
||||||
|
|
||||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user