Supeem commited on
Commit
8c48c68
·
1 Parent(s): acd66a9
Files changed (3) hide show
  1. Dockerfile +0 -2
  2. main.py +3 -25
  3. model.py +20 -0
Dockerfile CHANGED
@@ -9,8 +9,6 @@ COPY ./requirements.txt /code/requirements.txt
9
 
10
  RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
 
12
- COPY ~/.cache/torch/sentence_transformers /root/.cache/torch/sentence_transformers
13
-
14
  COPY . .
15
 
16
  CMD ["gunicorn", "-b", "0.0.0.0:7860", "main:app"]
 
9
 
10
  RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
 
 
 
12
  COPY . .
13
 
14
  CMD ["gunicorn", "-b", "0.0.0.0:7860", "main:app"]
main.py CHANGED
@@ -1,29 +1,7 @@
1
  from flask import Flask, request
2
  import torch
3
- import torch.nn as nn
4
  import numpy as np
5
- from sentence_transformers import SentenceTransformer
6
-
7
- class LSTM(nn.Module):
8
- def __init__(self, embedding_dim, hidden_dim, num_layers, output_dim):
9
- super(LSTM, self).__init__()
10
- self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, num_layers, batch_first=True)
11
- self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
12
- self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
13
- self.lstm4 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
14
- self.lstm5 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
15
- self.o = nn.Linear(hidden_dim, output_dim)
16
-
17
- def forward(self, embedding):
18
- o_n1, (h_n1, c_n1) = self.lstm1(embedding)
19
- o_n2, (h_n2, c_n2) = self.lstm2(o_n1, (h_n1, c_n1))
20
- o_n3, (h_n3, c_n3) = self.lstm3(o_n2, (h_n2, c_n2))
21
- o_n4, (h_n4, c_n4) = self.lstm4(o_n3, (h_n3, c_n3))
22
- o_n5, (h_n5, c_n5) = self.lstm5(o_n4, (h_n4, c_n4))
23
- output = self.o(o_n5)
24
- return output
25
-
26
- sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
27
 
28
  embedding_dim = 384
29
  hidden_dim = 512
@@ -32,14 +10,14 @@ output_dim = 180
32
  num_epochs = 100
33
  learning_rate = 0.001
34
 
35
- lstm_model = LSTM(embedding_dim, hidden_dim, num_layers, output_dim)
36
  lstm_model.load_state_dict(torch.load('lstm.pt'))
37
 
38
  app = Flask(__name__)
39
 
40
  def GeneratePosesJSON(input):
41
  with torch.no_grad():
42
- processed_text = torch.tensor(sentence_model.encode(input), dtype=torch.float)
43
  output_poses = lstm_model(processed_text.unsqueeze(0))
44
 
45
  people = output_poses.cpu().detach().numpy().reshape(5, 18, 2).tolist()
 
1
  from flask import Flask, request
2
  import torch
3
+ import model
4
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  embedding_dim = 384
7
  hidden_dim = 512
 
10
  num_epochs = 100
11
  learning_rate = 0.001
12
 
13
+ lstm_model = model.LSTM(embedding_dim, hidden_dim, num_layers, output_dim)
14
  lstm_model.load_state_dict(torch.load('lstm.pt'))
15
 
16
  app = Flask(__name__)
17
 
18
  def GeneratePosesJSON(input):
19
  with torch.no_grad():
20
+ processed_text = torch.tensor(input, dtype=torch.float)
21
  output_poses = lstm_model(processed_text.unsqueeze(0))
22
 
23
  people = output_poses.cpu().detach().numpy().reshape(5, 18, 2).tolist()
model.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ class LSTM(nn.Module):
4
+ def __init__(self, embedding_dim, hidden_dim, num_layers, output_dim):
5
+ super(LSTM, self).__init__()
6
+ self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, num_layers, batch_first=True)
7
+ self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
8
+ self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
9
+ self.lstm4 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
10
+ self.lstm5 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
11
+ self.o = nn.Linear(hidden_dim, output_dim)
12
+
13
+ def forward(self, embedding):
14
+ o_n1, (h_n1, c_n1) = self.lstm1(embedding)
15
+ o_n2, (h_n2, c_n2) = self.lstm2(o_n1, (h_n1, c_n1))
16
+ o_n3, (h_n3, c_n3) = self.lstm3(o_n2, (h_n2, c_n2))
17
+ o_n4, (h_n4, c_n4) = self.lstm4(o_n3, (h_n3, c_n3))
18
+ o_n5, (h_n5, c_n5) = self.lstm5(o_n4, (h_n4, c_n4))
19
+ output = self.o(o_n5)
20
+ return output