Update app.py
Browse files
app.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 3 |
-
|
| 4 |
model_name = 'ibm/qcpg-sentences'
|
| 5 |
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 6 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 7 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 8 |
model = AutoModelForSeq2SeqLM.from_pretrained("ibm/qcpg-sentences")
|
| 9 |
def get_response(input_text,num_return_sequences):
|
| 10 |
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=100, return_tensors="pt").to(torch_device)
|
|
|
|
| 1 |
import torch
|
| 2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
|
|
| 3 |
model_name = 'ibm/qcpg-sentences'
|
| 4 |
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 5 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 6 |
+
tokenizer = AutoTokenizer.from_pretrained("ibm/qcpg-sentences")
|
| 7 |
model = AutoModelForSeq2SeqLM.from_pretrained("ibm/qcpg-sentences")
|
| 8 |
def get_response(input_text,num_return_sequences):
|
| 9 |
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=100, return_tensors="pt").to(torch_device)
|