Skip to content
Snippets Groups Projects
Commit 982b09c7 authored by TheRiPtide's avatar TheRiPtide
Browse files

chore: notebooks doing weird stuff when rebasing

parent 8481b753
No related branches found
No related tags found
1 merge request!23feat: deep-leaning poly(A) classifier
...@@ -22,13 +22,20 @@ ...@@ -22,13 +22,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 80, "execution_count": 80,
=======
"execution_count": null,
>>>>>>> d2ef840 (chore: started cnn notebook)
"outputs": [], "outputs": [],
"source": [ "source": [
"# importing the libraries\n", "# importing the libraries\n",
"import pandas as pd\n", "import pandas as pd\n",
"import numpy as np\n", "import numpy as np\n",
<<<<<<< HEAD
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
=======
>>>>>>> d2ef840 (chore: started cnn notebook)
"\n", "\n",
"# for creating validation set\n", "# for creating validation set\n",
"from sklearn.model_selection import train_test_split\n", "from sklearn.model_selection import train_test_split\n",
...@@ -40,8 +47,13 @@ ...@@ -40,8 +47,13 @@
"# PyTorch libraries and modules\n", "# PyTorch libraries and modules\n",
"import torch\n", "import torch\n",
"from torch.autograd import Variable\n", "from torch.autograd import Variable\n",
<<<<<<< HEAD
"from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, MaxPool1d, Module, Softmax, BatchNorm1d, Dropout, Conv1d\n", "from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, MaxPool1d, Module, Softmax, BatchNorm1d, Dropout, Conv1d\n",
"from torch.optim import Adam\n", "from torch.optim import Adam\n",
=======
"from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout\n",
"from torch.optim import Adam, SGD\n",
>>>>>>> d2ef840 (chore: started cnn notebook)
"\n", "\n",
"\n", "\n",
"# adding the nn\n", "# adding the nn\n",
...@@ -50,6 +62,7 @@ ...@@ -50,6 +62,7 @@
" super(Net, self).__init__()\n", " super(Net, self).__init__()\n",
"\n", "\n",
" self.cnn_layers = Sequential(\n", " self.cnn_layers = Sequential(\n",
<<<<<<< HEAD
" # Defining a 1D convolution layer\n", " # Defining a 1D convolution layer\n",
" Conv1d(1, 4, kernel_size=3, stride=1, padding=1),\n", " Conv1d(1, 4, kernel_size=3, stride=1, padding=1),\n",
" BatchNorm1d(4),\n", " BatchNorm1d(4),\n",
...@@ -64,6 +77,22 @@ ...@@ -64,6 +77,22 @@
"\n", "\n",
" self.linear_layers = Sequential(\n", " self.linear_layers = Sequential(\n",
" Linear(4 * 50, 10)\n", " Linear(4 * 50, 10)\n",
=======
" # Defining a 2D convolution layer\n",
" Conv2d(1, 4, kernel_size=3, stride=1, padding=1),\n",
" BatchNorm2d(4),\n",
" ReLU(inplace=True),\n",
" MaxPool2d(kernel_size=2, stride=2),\n",
" # Defining another 2D convolution layer\n",
" Conv2d(4, 4, kernel_size=3, stride=1, padding=1),\n",
" BatchNorm2d(4),\n",
" ReLU(inplace=True),\n",
" MaxPool2d(kernel_size=2, stride=2),\n",
" )\n",
"\n",
" self.linear_layers = Sequential(\n",
" Linear(4 * 7 * 7, 10)\n",
>>>>>>> d2ef840 (chore: started cnn notebook)
" )\n", " )\n",
"\n", "\n",
" # Defining the forward pass\n", " # Defining the forward pass\n",
...@@ -71,6 +100,7 @@ ...@@ -71,6 +100,7 @@
" x = self.cnn_layers(x)\n", " x = self.cnn_layers(x)\n",
" x = x.view(x.size(0), -1)\n", " x = x.view(x.size(0), -1)\n",
" x = self.linear_layers(x)\n", " x = self.linear_layers(x)\n",
<<<<<<< HEAD
" return x\n", " return x\n",
"\n", "\n",
"# defining training function\n", "# defining training function\n",
...@@ -105,6 +135,9 @@ ...@@ -105,6 +135,9 @@
" tr_loss = loss_train.item()\n", " tr_loss = loss_train.item()\n",
"\n", "\n",
" return loss_train, loss_val" " return loss_train, loss_val"
=======
" return x"
>>>>>>> d2ef840 (chore: started cnn notebook)
], ],
"metadata": { "metadata": {
"collapsed": false, "collapsed": false,
...@@ -127,6 +160,7 @@ ...@@ -127,6 +160,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 81, "execution_count": 81,
"outputs": [ "outputs": [
{ {
...@@ -206,6 +240,20 @@ ...@@ -206,6 +240,20 @@
"\n", "\n",
"val_x = torch.from_numpy(val_x)\n", "val_x = torch.from_numpy(val_x)\n",
"val_y = torch.from_numpy(val_y)" "val_y = torch.from_numpy(val_y)"
=======
"execution_count": null,
"outputs": [],
"source": [
"# TODO: Get test data from issues 25 and 26\n",
"train_x = []\n",
"train_y = []\n",
"test_x = []\n",
"test_y = []\n",
"\n",
"train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size = 0.1)\n",
"\n",
"# TODO: reshape shape from [n, l] to [n, 1, l]\n"
>>>>>>> d2ef840 (chore: started cnn notebook)
], ],
"metadata": { "metadata": {
"collapsed": false, "collapsed": false,
...@@ -228,6 +276,7 @@ ...@@ -228,6 +276,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 83, "execution_count": 83,
"outputs": [ "outputs": [
{ {
...@@ -248,10 +297,22 @@ ...@@ -248,10 +297,22 @@
"# defining the loss function\n", "# defining the loss function\n",
"criterion = CrossEntropyLoss()\n", "criterion = CrossEntropyLoss()\n",
"\n", "\n",
=======
"execution_count": null,
"outputs": [],
"source": [
"# defining the model\n",
"model = Net()\n",
"# defining the optimizer\n",
"optimizer = Adam(model.parameters(), lr=0.07)\n",
"# defining the loss function\n",
"criterion = CrossEntropyLoss()\n",
>>>>>>> d2ef840 (chore: started cnn notebook)
"# checking if GPU is available\n", "# checking if GPU is available\n",
"if torch.cuda.is_available():\n", "if torch.cuda.is_available():\n",
" model = model.cuda()\n", " model = model.cuda()\n",
" criterion = criterion.cuda()\n", " criterion = criterion.cuda()\n",
<<<<<<< HEAD
"\n", "\n",
"# defining the number of epochs\n", "# defining the number of epochs\n",
"n_epochs = 25\n", "n_epochs = 25\n",
...@@ -379,6 +440,9 @@ ...@@ -379,6 +440,9 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"torch.save(model.state_dict(), '../models/internal_priming.pth')" "torch.save(model.state_dict(), '../models/internal_priming.pth')"
=======
"\n"
>>>>>>> d2ef840 (chore: started cnn notebook)
], ],
"metadata": { "metadata": {
"collapsed": false, "collapsed": false,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment