Spaces:
Runtime error
Runtime error
| import json | |
| import os | |
| import openai | |
| import requests | |
| import streamlit as st | |
| CHATBOT_ENDPOINT = os.environ["CHATBOT_ENDPOINT"] | |
| TOKEN = os.environ["TOKEN"] | |
| MAINTENANCE = os.environ.get("MAINTENANCE", 0) | |
| client = openai.OpenAI( | |
| base_url=CHATBOT_ENDPOINT, | |
| api_key=TOKEN, | |
| timeout=60 | |
| ) | |
| def generate(prompt): | |
| try: | |
| completion = client.chat.completions.create( | |
| model="stockmark/stockmark-100b-instruct-merged-v0.1", | |
| messages=[{"role": "user", "content": prompt}], | |
| extra_body={"repetition_penalty": 1.05}, | |
| max_tokens=256, | |
| temperature=0.5, | |
| top_p=0.95, | |
| stream=True | |
| ) | |
| for s in completion: | |
| s = s.choices[0].delta.content | |
| if s: | |
| if s == "\n": | |
| s = " \n" | |
| yield s | |
| except: | |
| yield "<<予期せぬエラーが発生しております。しばらくしてからアクセスください。>>" | |
| intro = """This is a demo site for Stockmark-LLM-100b. This service is running on AWS Inferentia2. Currently, the response is sometimes slow due to many requests to the server. | |
| - Pretrained model: [stockmark/stockmark-100b](https://huggingface.co/stockmark/stockmark-100b) | |
| - Instruction tuned model: [stockmark/stockmark-100b-instruct-v0.1](https://huggingface.co/stockmark/stockmark-100b-instruct-v0.1) | |
| """ | |
| disclaimer = """ | |
| - Responses of our LLM may be incorrect, biased, or harmful. | |
| - We may use users chat data in this demo to improve our LLM. | |
| """ | |
| if MAINTENANCE: | |
| st.title("Stockmark-LLM-100b") | |
| #st.markdown("ただいまメンテナンス中です。申し訳ありませんが、しばらくしてからアクセスしてください。") | |
| st.markdown("このデモサイトは公開を終了しました。stockmark-100bのモデルは以下のレポジトリからダウンロードできます。モデルの実行方法に関しても該当のレポジトリをご参考にしてください。") | |
| st.markdown("- 事前学習モデル: [stockmark/stockmark-100b](https://huggingface.co/stockmark/stockmark-100b)") | |
| st.markdown("- 指示学習モデル: [stockmark/stockmark-100b-instruct-v0.1](https://huggingface.co/stockmark/stockmark-100b-instruct-v0.1)") | |
| st.stop() | |
| tab1, tab2 = st.tabs(["Demo", "Disclaimer"]) | |
| with tab1: | |
| st.title("Stockmark-LLM-100b") | |
| st.markdown(intro) | |
| prompt = st.session_state.get("prompt", "") | |
| response = st.session_state.get("response", "") | |
| if prompt == "" or response: | |
| print("new_session") | |
| prompt_new = st.text_area("Prompt:") | |
| if prompt_new: | |
| st.session_state["prompt"] = prompt_new | |
| st.session_state["response"] = "" | |
| st.rerun() | |
| else: | |
| prompt = st.text_area("Prompt:", value=prompt, disabled=True) | |
| if prompt: | |
| if response: | |
| with st.chat_message("assistant"): | |
| st.write(response) | |
| else: | |
| with st.chat_message("assistant"): | |
| response = st.write_stream(generate(prompt)) | |
| st.session_state["response"] = response | |
| st.rerun() | |
| with tab2: | |
| st.title("Stockmark-LLM-100b: Disclaimer") | |
| st.markdown(disclaimer) |