Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -36,13 +36,10 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
| 36 |
# Define prompt structure (update if necessary for your model)
|
| 37 |
alpaca_prompt = "{instruction} {input} {output}"
|
| 38 |
|
| 39 |
-
instruction = "Chat with me like Chandler"
|
| 40 |
-
|
| 41 |
-
|
| 42 |
@spaces.GPU # Use GPU provided by Hugging Face Spaces if available
|
| 43 |
def generate_response(user_input, chat_history):
|
| 44 |
-
instruction =
|
| 45 |
-
input_text = user_input #
|
| 46 |
|
| 47 |
# Prepare inputs for model inference on the correct device
|
| 48 |
inputs = tokenizer(
|
|
|
|
| 36 |
# Define prompt structure (update if necessary for your model)
|
| 37 |
alpaca_prompt = "{instruction} {input} {output}"
|
| 38 |
|
|
|
|
|
|
|
|
|
|
| 39 |
@spaces.GPU # Use GPU provided by Hugging Face Spaces if available
|
| 40 |
def generate_response(user_input, chat_history):
|
| 41 |
+
instruction = "Chat with me like Chandler talks."
|
| 42 |
+
input_text = user_input # Treats user input as the input
|
| 43 |
|
| 44 |
# Prepare inputs for model inference on the correct device
|
| 45 |
inputs = tokenizer(
|