Skip to content
Snippets Groups Projects
Commit 272419d9 authored by TheRiPtide's avatar TheRiPtide
Browse files

chore: rebase

parent af5a124a
Branches
No related tags found
1 merge request!23feat: deep-leaning poly(A) classifier
......@@ -22,13 +22,20 @@
},
{
"cell_type": "code",
<<<<<<< HEAD
"execution_count": 68,
=======
"execution_count": null,
>>>>>>> d2ef840 (chore: started cnn notebook)
"outputs": [],
"source": [
"# importing the libraries\n",
"import pandas as pd\n",
"import numpy as np\n",
<<<<<<< HEAD
"import matplotlib.pyplot as plt\n",
=======
>>>>>>> d2ef840 (chore: started cnn notebook)
"\n",
"# for creating validation set\n",
"from sklearn.model_selection import train_test_split\n",
......@@ -40,7 +47,11 @@
"# PyTorch libraries and modules\n",
"import torch\n",
"from torch.autograd import Variable\n",
<<<<<<< HEAD
"from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, MaxPool1d, Module, Softmax, BatchNorm1d, Dropout, Conv1d\n",
=======
"from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout\n",
>>>>>>> d2ef840 (chore: started cnn notebook)
"from torch.optim import Adam, SGD\n",
"\n",
"\n",
......@@ -50,6 +61,7 @@
" super(Net, self).__init__()\n",
"\n",
" self.cnn_layers = Sequential(\n",
<<<<<<< HEAD
" # Defining a 1D convolution layer\n",
" Conv1d(1, 4, kernel_size=3, stride=1, padding=1),\n",
" BatchNorm1d(4),\n",
......@@ -64,6 +76,22 @@
"\n",
" self.linear_layers = Sequential(\n",
" Linear(4 * 50, 10)\n",
=======
" # Defining a 2D convolution layer\n",
" Conv2d(1, 4, kernel_size=3, stride=1, padding=1),\n",
" BatchNorm2d(4),\n",
" ReLU(inplace=True),\n",
" MaxPool2d(kernel_size=2, stride=2),\n",
" # Defining another 2D convolution layer\n",
" Conv2d(4, 4, kernel_size=3, stride=1, padding=1),\n",
" BatchNorm2d(4),\n",
" ReLU(inplace=True),\n",
" MaxPool2d(kernel_size=2, stride=2),\n",
" )\n",
"\n",
" self.linear_layers = Sequential(\n",
" Linear(4 * 7 * 7, 10)\n",
>>>>>>> d2ef840 (chore: started cnn notebook)
" )\n",
"\n",
" # Defining the forward pass\n",
......@@ -71,6 +99,7 @@
" x = self.cnn_layers(x)\n",
" x = x.view(x.size(0), -1)\n",
" x = self.linear_layers(x)\n",
<<<<<<< HEAD
" return x\n",
"\n",
"# defining training function\n",
......@@ -105,6 +134,9 @@
" tr_loss = loss_train.item()\n",
"\n",
" return loss_train, loss_val"
=======
" return x"
>>>>>>> d2ef840 (chore: started cnn notebook)
],
"metadata": {
"collapsed": false,
......@@ -127,6 +159,7 @@
},
{
"cell_type": "code",
<<<<<<< HEAD
"execution_count": 69,
"outputs": [
{
......@@ -206,6 +239,20 @@
"\n",
"val_x = torch.from_numpy(val_x)\n",
"val_y = torch.from_numpy(val_y)"
=======
"execution_count": null,
"outputs": [],
"source": [
"# TODO: Get test data from issues 25 and 26\n",
"train_x = []\n",
"train_y = []\n",
"test_x = []\n",
"test_y = []\n",
"\n",
"train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size = 0.1)\n",
"\n",
"# TODO: reshape shape from [n, l] to [n, 1, l]\n"
>>>>>>> d2ef840 (chore: started cnn notebook)
],
"metadata": {
"collapsed": false,
......@@ -228,6 +275,7 @@
},
{
"cell_type": "code",
<<<<<<< HEAD
"execution_count": 71,
"outputs": [
{
......@@ -271,10 +319,22 @@
"# defining the loss function\n",
"criterion = CrossEntropyLoss()\n",
"\n",
=======
"execution_count": null,
"outputs": [],
"source": [
"# defining the model\n",
"model = Net()\n",
"# defining the optimizer\n",
"optimizer = Adam(model.parameters(), lr=0.07)\n",
"# defining the loss function\n",
"criterion = CrossEntropyLoss()\n",
>>>>>>> d2ef840 (chore: started cnn notebook)
"# checking if GPU is available\n",
"if torch.cuda.is_available():\n",
" model = model.cuda()\n",
" criterion = criterion.cuda()\n",
<<<<<<< HEAD
"\n",
"# defining the number of epochs\n",
"n_epochs = 25\n",
......@@ -402,6 +462,9 @@
"outputs": [],
"source": [
"torch.save(model.state_dict(), '../models/internal_priming.pth')"
=======
"\n"
>>>>>>> d2ef840 (chore: started cnn notebook)
],
"metadata": {
"collapsed": false,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment