{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[],"dockerImageVersionId":30839,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# 0. How to use\n- Go to the \"Inference session\"\n- Execute the classifier object: text, emotional, speech-to-text\n- Make sure you prepare the correct library/framework/environment stuff ...\n \n","metadata":{}},{"cell_type":"markdown","source":"# TEXT CLASSIFCATION\n- Fine-tuning\n- Inference","metadata":{}},{"cell_type":"markdown","source":"# 1. Data preprocessing","metadata":{}},{"cell_type":"code","source":"import pandas as pd\nimport re","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:16.370148Z","iopub.execute_input":"2025-10-07T02:32:16.370423Z","iopub.status.idle":"2025-10-07T02:32:17.298932Z","shell.execute_reply.started":"2025-10-07T02:32:16.370400Z","shell.execute_reply":"2025-10-07T02:32:17.297981Z"}},"outputs":[],"execution_count":1},{"cell_type":"code","source":"def clean_and_tokenize(text: str) -> list:\n text = text.lower()\n text = re.sub(r'[^\\w\\s-]', '', text)\n text = re.sub(r'\\s+', ' ', text).strip()\n return text.split()\n\ndef preprocess_df(df: pd.DataFrame, text_col: str) -> pd.DataFrame:\n df[\"tokens\"] = df[text_col].apply(clean_and_tokenize)\n df[\"processed_text\"] = df[\"tokens\"].apply(lambda tokens: \" \".join(tokens))\n return df","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.299858Z","iopub.execute_input":"2025-10-07T02:32:17.300269Z","iopub.status.idle":"2025-10-07T02:32:17.305274Z","shell.execute_reply.started":"2025-10-07T02:32:17.300246Z","shell.execute_reply":"2025-10-07T02:32:17.304417Z"}},"outputs":[],"execution_count":2},{"cell_type":"code","source":"# Read dataset\nthreat = pd.read_csv('/PATH/TO/THE/THREAT_CSV')\nnon_threat = pd.read_csv('/PATH/TO/THE/NOT_THREAT_CSV')\n\n# Preprocess dataset\nthreat = preprocess_df(threat, 'sentence')\nnon_threat = preprocess_df(non_threat, 'sentence')\n\n# drop a column name tokens\nthreat.drop('tokens', axis=1)\nnon_threat.drop('tokens', axis=1)\ndf = pd.concat([threat, non_threat], axis=0)\n\n# mapping dataset with string label\ndf[\"label\"] = df[\"label\"].map({0: \"non-threat\", 1: \"threat\"})\n\n# print information\ndf","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.306161Z","iopub.execute_input":"2025-10-07T02:32:17.306513Z","iopub.status.idle":"2025-10-07T02:32:17.437993Z","shell.execute_reply.started":"2025-10-07T02:32:17.306482Z","shell.execute_reply":"2025-10-07T02:32:17.434723Z"}},"outputs":[{"traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)","\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# Read dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mthreat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'/PATH/TO/THE/THREAT_CSV'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0mnon_threat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'/PATH/TO/THE/NOT_THREAT_CSV'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;31m# Preprocess dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36mread_csv\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, date_format, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options, dtype_backend)\u001b[0m\n\u001b[1;32m 1024\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkwds_defaults\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1025\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1026\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1027\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1028\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 619\u001b[0m \u001b[0;31m# Create the parser.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 620\u001b[0;31m \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTextFileReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 621\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 622\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mchunksize\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0miterator\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[1;32m 1618\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1619\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandles\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mIOHandles\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1620\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1621\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1622\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36m_make_engine\u001b[0;34m(self, f, engine)\u001b[0m\n\u001b[1;32m 1878\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m\"b\"\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1879\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;34m\"b\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1880\u001b[0;31m self.handles = get_handle(\n\u001b[0m\u001b[1;32m 1881\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1882\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pandas/io/common.py\u001b[0m in \u001b[0;36mget_handle\u001b[0;34m(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options)\u001b[0m\n\u001b[1;32m 871\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mioargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mencoding\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m\"b\"\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mioargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 872\u001b[0m \u001b[0;31m# Encoding\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 873\u001b[0;31m handle = open(\n\u001b[0m\u001b[1;32m 874\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 875\u001b[0m \u001b[0mioargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/PATH/TO/THE/THREAT_CSV'"],"ename":"FileNotFoundError","evalue":"[Errno 2] No such file or directory: '/PATH/TO/THE/THREAT_CSV'","output_type":"error"}],"execution_count":3},{"cell_type":"code","source":"# shuffle the df and split it into 8/2\nfrom sklearn.model_selection import train_test_split\ndf = df.sample(frac=1, random_state=42).reset_index(drop=True)\n\ntrain_df, test_df = train_test_split(df, test_size=0.1, random_state=42, shuffle=True)\ntrain_df.to_csv('train_data.csv', index=False)\ntest_df.to_csv('test_data.csv', index=False)\n\n# analyst dataset information\ntest_label_count = test_df['label'].value_counts()\ntrain_label_count = train_df['label'].value_counts()\nprint('Test count: ', test_label_count)\nprint('Train count: ', train_label_count)\n\n# save dataset\nVAL_DIR = '/kaggle/working/test_data.csv'\nTRAIN_DIR = '/kaggle/working/train_data.csv'\ntest_df.to_csv(VAL_DIR)\ntrain_df.to_csv(TRAIN_DIR)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.438665Z","iopub.status.idle":"2025-10-07T02:32:17.439045Z","shell.execute_reply":"2025-10-07T02:32:17.438883Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# 2. Fine-tuning","metadata":{}},{"cell_type":"code","source":"!pip install --upgrade pip\n!pip install mediapipe-model-maker\n\nimport os\nimport tensorflow as tf\nassert tf.__version__.startswith('2')\n\nfrom mediapipe_model_maker import text_classifier\nfrom mediapipe_model_maker import model_util\nfrom mediapipe_model_maker import quantization","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:36:28.892828Z","iopub.execute_input":"2025-10-07T02:36:28.893111Z","iopub.status.idle":"2025-10-07T02:38:41.758758Z","shell.execute_reply.started":"2025-10-07T02:36:28.893083Z","shell.execute_reply":"2025-10-07T02:38:41.757845Z"}},"outputs":[{"name":"stdout","text":"Requirement already satisfied: pip in /usr/local/lib/python3.10/dist-packages (24.1.2)\nCollecting pip\n Downloading pip-25.2-py3-none-any.whl.metadata (4.7 kB)\nDownloading pip-25.2-py3-none-any.whl (1.8 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m12.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hInstalling collected packages: pip\n Attempting uninstall: pip\n Found existing installation: pip 24.1.2\n Uninstalling pip-24.1.2:\n Successfully uninstalled pip-24.1.2\nSuccessfully installed pip-25.2\nCollecting mediapipe-model-maker\n Downloading mediapipe_model_maker-0.2.1.4-py3-none-any.whl.metadata (1.7 kB)\nRequirement already satisfied: absl-py in /usr/local/lib/python3.10/dist-packages (from mediapipe-model-maker) (1.4.0)\nCollecting mediapipe>=0.10.0 (from mediapipe-model-maker)\n Downloading mediapipe-0.10.21-cp310-cp310-manylinux_2_28_x86_64.whl.metadata (9.7 kB)\nRequirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mediapipe-model-maker) (1.26.4)\nRequirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from mediapipe-model-maker) (4.10.0.84)\nCollecting tensorflow<2.16,>=2.10 (from mediapipe-model-maker)\n Downloading tensorflow-2.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.2 kB)\nCollecting tensorflow-addons (from mediapipe-model-maker)\n Downloading tensorflow_addons-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.8 kB)\nRequirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.10/dist-packages (from mediapipe-model-maker) (4.9.7)\nRequirement already satisfied: tensorflow-hub in /usr/local/lib/python3.10/dist-packages (from mediapipe-model-maker) (0.16.1)\nCollecting tensorflow-model-optimization<0.8.0 (from mediapipe-model-maker)\n Downloading tensorflow_model_optimization-0.7.5-py2.py3-none-any.whl.metadata (914 bytes)\nRequirement already satisfied: tensorflow-text in /usr/local/lib/python3.10/dist-packages (from mediapipe-model-maker) (2.17.0)\nCollecting tf-models-official<2.16.0,>=2.13.2 (from mediapipe-model-maker)\n Downloading tf_models_official-2.15.0-py2.py3-none-any.whl.metadata (1.4 kB)\nRequirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (1.6.3)\nRequirement already satisfied: flatbuffers>=23.5.26 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (24.3.25)\nRequirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (0.6.0)\nRequirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (0.2.0)\nRequirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.12.1)\nRequirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (18.1.1)\nCollecting ml-dtypes~=0.3.1 (from tensorflow<2.16,>=2.10->mediapipe-model-maker)\n Downloading ml_dtypes-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (20 kB)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.4.0)\nRequirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (24.2)\nRequirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.20.3)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (75.1.0)\nRequirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (1.17.0)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (2.5.0)\nRequirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (4.12.2)\nCollecting wrapt<1.15,>=1.11.0 (from tensorflow<2.16,>=2.10->mediapipe-model-maker)\n Downloading wrapt-1.14.2-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.5 kB)\nRequirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (0.37.1)\nRequirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.16,>=2.10->mediapipe-model-maker) (1.68.1)\nCollecting tensorboard<2.16,>=2.15 (from tensorflow<2.16,>=2.10->mediapipe-model-maker)\n Downloading tensorboard-2.15.2-py3-none-any.whl.metadata (1.7 kB)\nCollecting tensorflow-estimator<2.16,>=2.15.0 (from tensorflow<2.16,>=2.10->mediapipe-model-maker)\n Downloading tensorflow_estimator-2.15.0-py2.py3-none-any.whl.metadata (1.3 kB)\nCollecting keras<2.16,>=2.15.0 (from tensorflow<2.16,>=2.10->mediapipe-model-maker)\n Downloading keras-2.15.0-py3-none-any.whl.metadata (2.4 kB)\nRequirement already satisfied: mkl_fft in /usr/local/lib/python3.10/dist-packages (from numpy->mediapipe-model-maker) (1.3.8)\nRequirement already satisfied: mkl_random in /usr/local/lib/python3.10/dist-packages (from numpy->mediapipe-model-maker) (1.2.4)\nRequirement already satisfied: mkl_umath in /usr/local/lib/python3.10/dist-packages (from numpy->mediapipe-model-maker) (0.1.1)\nRequirement already satisfied: mkl in /usr/local/lib/python3.10/dist-packages (from numpy->mediapipe-model-maker) (2025.0.1)\nRequirement already satisfied: tbb4py in /usr/local/lib/python3.10/dist-packages (from numpy->mediapipe-model-maker) (2022.0.0)\nRequirement already satisfied: mkl-service in /usr/local/lib/python3.10/dist-packages (from numpy->mediapipe-model-maker) (2.4.1)\nRequirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (2.27.0)\nRequirement already satisfied: google-auth-oauthlib<2,>=0.5 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (1.2.1)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.7)\nRequirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (2.32.3)\nRequirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (0.7.2)\nRequirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.1.3)\nRequirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (5.5.0)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (0.4.1)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (4.9)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<2,>=0.5->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (1.3.1)\nRequirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.4.0)\nRequirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.10)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (2.2.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (2024.12.14)\nRequirement already satisfied: pyasn1>=0.1.3 in /usr/local/lib/python3.10/dist-packages (from rsa<5,>=3.1.4->google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (0.6.1)\nRequirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow-model-optimization<0.8.0->mediapipe-model-maker) (0.1.8)\nRequirement already satisfied: Cython in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (3.0.11)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (11.0.0)\nRequirement already satisfied: gin-config in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.5.0)\nRequirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (2.155.0)\nRequirement already satisfied: immutabledict in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (4.2.1)\nRequirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.6.17)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (3.7.5)\nRequirement already satisfied: oauth2client in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (4.1.3)\nRequirement already satisfied: opencv-python-headless in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (4.10.0.84)\nRequirement already satisfied: pandas>=0.22.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (2.2.2)\nRequirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (5.9.5)\nRequirement already satisfied: py-cpuinfo>=3.3.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (9.0.0)\nRequirement already satisfied: pycocotools in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (2.0.8)\nRequirement already satisfied: pyyaml>=6.0.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (6.0.2)\nCollecting sacrebleu (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker)\n Downloading sacrebleu-2.5.1-py3-none-any.whl.metadata (51 kB)\nRequirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.13.1)\nRequirement already satisfied: sentencepiece in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.2.0)\nCollecting seqeval (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker)\n Downloading seqeval-1.2.2.tar.gz (43 kB)\n Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\nCollecting tensorflow-text (from mediapipe-model-maker)\n Downloading tensorflow_text-2.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.9 kB)\nRequirement already satisfied: tf-slim>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.1.0)\nRequirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from astunparse>=1.6.0->tensorflow<2.16,>=2.10->mediapipe-model-maker) (0.45.1)\nRequirement already satisfied: httplib2<1.dev0,>=0.19.0 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.22.0)\nRequirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.2.0)\nRequirement already satisfied: google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.34.1)\nRequirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (4.1.1)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.56.2 in /usr/local/lib/python3.10/dist-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client>=1.6.7->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.66.0)\nRequirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in /usr/local/lib/python3.10/dist-packages (from httplib2<1.dev0,>=0.19.0->google-api-python-client>=1.6.7->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (3.2.0)\nRequirement already satisfied: python-dateutil in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (2.8.2)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (4.67.1)\nRequirement already satisfied: python-slugify in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (8.0.4)\nRequirement already satisfied: bleach in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (6.2.0)\nRequirement already satisfied: attrs>=19.1.0 in /usr/local/lib/python3.10/dist-packages (from mediapipe>=0.10.0->mediapipe-model-maker) (24.3.0)\nRequirement already satisfied: jax in /usr/local/lib/python3.10/dist-packages (from mediapipe>=0.10.0->mediapipe-model-maker) (0.4.33)\nRequirement already satisfied: jaxlib in /usr/local/lib/python3.10/dist-packages (from mediapipe>=0.10.0->mediapipe-model-maker) (0.4.33)\nRequirement already satisfied: opencv-contrib-python in /usr/local/lib/python3.10/dist-packages (from mediapipe>=0.10.0->mediapipe-model-maker) (4.10.0.84)\nINFO: pip is looking at multiple versions of mediapipe to determine which version is compatible with other requirements. This could take a while.\nCollecting mediapipe>=0.10.0 (from mediapipe-model-maker)\n Downloading mediapipe-0.10.20-cp310-cp310-manylinux_2_28_x86_64.whl.metadata (9.7 kB)\n Downloading mediapipe-0.10.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (9.7 kB)\n Downloading mediapipe-0.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (9.7 kB)\n Downloading mediapipe-0.10.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (9.7 kB)\n Downloading mediapipe-0.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (9.7 kB)\n Downloading mediapipe-0.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (9.7 kB)\nRequirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from mediapipe>=0.10.0->mediapipe-model-maker) (2.5.1+cu121)\nCollecting sounddevice>=0.4.4 (from mediapipe>=0.10.0->mediapipe-model-maker)\n Downloading sounddevice-0.5.2-py3-none-any.whl.metadata (1.6 kB)\nRequirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=0.22.0->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (2024.2)\nRequirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.10/dist-packages (from pandas>=0.22.0->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (2024.2)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<2,>=0.5->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.2.2)\nRequirement already satisfied: CFFI>=1.0 in /usr/local/lib/python3.10/dist-packages (from sounddevice>=0.4.4->mediapipe>=0.10.0->mediapipe-model-maker) (1.17.1)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.10/dist-packages (from CFFI>=1.0->sounddevice>=0.4.4->mediapipe>=0.10.0->mediapipe-model-maker) (2.22)\nRequirement already satisfied: tf-keras>=2.14.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow-hub->mediapipe-model-maker) (2.17.0)\nINFO: pip is looking at multiple versions of tf-keras to determine which version is compatible with other requirements. This could take a while.\nCollecting tf-keras>=2.14.1 (from tensorflow-hub->mediapipe-model-maker)\n Downloading tf_keras-2.20.1-py3-none-any.whl.metadata (1.8 kB)\n Downloading tf_keras-2.19.0-py3-none-any.whl.metadata (1.8 kB)\n Downloading tf_keras-2.18.0-py3-none-any.whl.metadata (1.6 kB)\n Downloading tf_keras-2.16.0-py3-none-any.whl.metadata (1.6 kB)\n Downloading tf_keras-2.15.1-py3-none-any.whl.metadata (1.7 kB)\nRequirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard<2.16,>=2.15->tensorflow<2.16,>=2.10->mediapipe-model-maker) (3.0.2)\nRequirement already satisfied: webencodings in /usr/local/lib/python3.10/dist-packages (from bleach->kaggle>=1.3.9->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.5.1)\nRequirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.3.1)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.12.1)\nRequirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (4.55.3)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.4.7)\nRequirement already satisfied: intel-openmp>=2024 in /usr/local/lib/python3.10/dist-packages (from mkl->numpy->mediapipe-model-maker) (2024.2.0)\nRequirement already satisfied: tbb==2022.* in /usr/local/lib/python3.10/dist-packages (from mkl->numpy->mediapipe-model-maker) (2022.0.0)\nRequirement already satisfied: tcmlib==1.* in /usr/local/lib/python3.10/dist-packages (from tbb==2022.*->mkl->numpy->mediapipe-model-maker) (1.2.0)\nRequirement already satisfied: intel-cmplr-lib-ur==2024.2.0 in /usr/local/lib/python3.10/dist-packages (from intel-openmp>=2024->mkl->numpy->mediapipe-model-maker) (2024.2.0)\nRequirement already satisfied: intel-cmplr-lib-rt in /usr/local/lib/python3.10/dist-packages (from mkl_umath->numpy->mediapipe-model-maker) (2024.2.0)\nRequirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.10/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.3)\nCollecting portalocker (from sacrebleu->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker)\n Downloading portalocker-3.2.0-py3-none-any.whl.metadata (8.7 kB)\nRequirement already satisfied: regex in /usr/local/lib/python3.10/dist-packages (from sacrebleu->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (2024.11.6)\nRequirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.10/dist-packages (from sacrebleu->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.9.0)\nRequirement already satisfied: colorama in /usr/local/lib/python3.10/dist-packages (from sacrebleu->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (0.4.6)\nRequirement already satisfied: lxml in /usr/local/lib/python3.10/dist-packages (from sacrebleu->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (5.3.0)\nRequirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.10/dist-packages (from seqeval->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.2.2)\nRequirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (1.4.2)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official<2.16.0,>=2.13.2->mediapipe-model-maker) (3.5.0)\nCollecting typeguard<3.0.0,>=2.7 (from tensorflow-addons->mediapipe-model-maker)\n Downloading typeguard-2.13.3-py3-none-any.whl.metadata (3.6 kB)\nRequirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->mediapipe-model-maker) (8.1.7)\nRequirement already satisfied: promise in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->mediapipe-model-maker) (2.3)\nRequirement already satisfied: pyarrow in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->mediapipe-model-maker) (17.0.0)\nRequirement already satisfied: simple-parsing in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->mediapipe-model-maker) (0.1.6)\nRequirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->mediapipe-model-maker) (1.13.1)\nRequirement already satisfied: toml in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->mediapipe-model-maker) (0.10.2)\nRequirement already satisfied: array-record>=0.5.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->mediapipe-model-maker) (0.5.1)\nRequirement already satisfied: etils>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets->mediapipe-model-maker) (1.11.0)\nRequirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets->mediapipe-model-maker) (2024.9.0)\nRequirement already satisfied: importlib_resources in /usr/local/lib/python3.10/dist-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets->mediapipe-model-maker) (6.4.5)\nRequirement already satisfied: zipp in /usr/local/lib/python3.10/dist-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets->mediapipe-model-maker) (3.21.0)\nRequirement already satisfied: docstring-parser<1.0,>=0.15 in /usr/local/lib/python3.10/dist-packages (from simple-parsing->tensorflow-datasets->mediapipe-model-maker) (0.16)\nRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch->mediapipe>=0.10.0->mediapipe-model-maker) (3.16.1)\nRequirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->mediapipe>=0.10.0->mediapipe-model-maker) (3.4.2)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->mediapipe>=0.10.0->mediapipe-model-maker) (3.1.4)\nRequirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.10/dist-packages (from torch->mediapipe>=0.10.0->mediapipe-model-maker) (1.13.1)\nRequirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from sympy==1.13.1->torch->mediapipe>=0.10.0->mediapipe-model-maker) (1.3.0)\nDownloading mediapipe_model_maker-0.2.1.4-py3-none-any.whl (133 kB)\nDownloading tensorflow-2.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (475.2 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m475.2/475.2 MB\u001b[0m \u001b[31m8.6 MB/s\u001b[0m \u001b[33m0:00:53\u001b[0m6m0:00:01\u001b[0m00:02\u001b[0m\n\u001b[?25hDownloading keras-2.15.0-py3-none-any.whl (1.7 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m58.8 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n\u001b[?25hDownloading ml_dtypes-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.2 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.2/2.2 MB\u001b[0m \u001b[31m83.2 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n\u001b[?25hDownloading tensorboard-2.15.2-py3-none-any.whl (5.5 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.5/5.5 MB\u001b[0m \u001b[31m106.3 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n\u001b[?25hDownloading tensorflow_estimator-2.15.0-py2.py3-none-any.whl (441 kB)\nDownloading tensorflow_model_optimization-0.7.5-py2.py3-none-any.whl (241 kB)\nDownloading tf_models_official-2.15.0-py2.py3-none-any.whl (2.7 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.7/2.7 MB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m eta \u001b[36m0:00:01\u001b[0m0m\n\u001b[?25hDownloading tensorflow_text-2.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.2 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.2/5.2 MB\u001b[0m \u001b[31m91.7 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n\u001b[?25hDownloading wrapt-1.14.2-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (76 kB)\nDownloading mediapipe-0.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (35.6 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m35.6/35.6 MB\u001b[0m \u001b[31m110.7 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0mm0:00:01\u001b[0m\n\u001b[?25hDownloading sounddevice-0.5.2-py3-none-any.whl (32 kB)\nDownloading tf_keras-2.15.1-py3-none-any.whl (1.7 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m68.8 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n\u001b[?25hDownloading sacrebleu-2.5.1-py3-none-any.whl (104 kB)\nDownloading portalocker-3.2.0-py3-none-any.whl (22 kB)\nDownloading tensorflow_addons-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (611 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m611.8/611.8 kB\u001b[0m \u001b[31m19.8 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n\u001b[?25hDownloading typeguard-2.13.3-py3-none-any.whl (17 kB)\nBuilding wheels for collected packages: seqeval\n\u001b[33m DEPRECATION: Building 'seqeval' using the legacy setup.py bdist_wheel mechanism, which will be removed in a future version. pip 25.3 will enforce this behaviour change. A possible replacement is to use the standardized build interface by setting the `--use-pep517` option, (possibly combined with `--no-build-isolation`), or adding a `pyproject.toml` file to the source tree of 'seqeval'. Discussion can be found at https://github.com/pypa/pip/issues/6334\u001b[0m\u001b[33m\n\u001b[0m Building wheel for seqeval (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for seqeval: filename=seqeval-1.2.2-py3-none-any.whl size=16161 sha256=d0c942eb1844e343dd532e42ee700591c535166614de75b7e5a243d935cf41b6\n Stored in directory: /root/.cache/pip/wheels/1a/67/4a/ad4082dd7dfc30f2abfe4d80a2ed5926a506eb8a972b4767fa\nSuccessfully built seqeval\nInstalling collected packages: wrapt, typeguard, tensorflow-estimator, portalocker, keras, tensorflow-addons, sounddevice, tensorboard, ml-dtypes, tensorflow, tf-keras, tensorflow-text, tensorflow-model-optimization, seqeval, sacrebleu, tf-models-official, mediapipe, mediapipe-model-maker\n\u001b[2K Attempting uninstall: wrapt\n\u001b[2K Found existing installation: wrapt 1.17.0\n\u001b[2K Uninstalling wrapt-1.17.0:\n\u001b[2K Successfully uninstalled wrapt-1.17.0\n\u001b[2K Attempting uninstall: typeguard\n\u001b[2K Found existing installation: typeguard 4.4.1\n\u001b[2K Uninstalling typeguard-4.4.1:\n\u001b[2K Successfully uninstalled typeguard-4.4.1\n\u001b[2K Attempting uninstall: keras0m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 2/18\u001b[0m [tensorflow-estimator]\n\u001b[2K Found existing installation: keras 3.5.0━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 2/18\u001b[0m [tensorflow-estimator]\n\u001b[2K Uninstalling keras-3.5.0:m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/18\u001b[0m [keras]-estimator]\n\u001b[2K Successfully uninstalled keras-3.5.0━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/18\u001b[0m [keras]\n\u001b[2K Attempting uninstall: tensorboard90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 5/18\u001b[0m [tensorflow-addons]\n\u001b[2K Found existing installation: tensorboard 2.17.1━━━━━━━━━━━\u001b[0m \u001b[32m 5/18\u001b[0m [tensorflow-addons]\n\u001b[2K Uninstalling tensorboard-2.17.1:━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 5/18\u001b[0m [tensorflow-addons]\n\u001b[2K Successfully uninstalled tensorboard-2.17.1━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 7/18\u001b[0m [tensorboard]s]\n\u001b[2K Attempting uninstall: ml-dtypes\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 7/18\u001b[0m [tensorboard]\n\u001b[2K Found existing installation: ml-dtypes 0.4.1━━━━━━━━━━━━━━\u001b[0m \u001b[32m 7/18\u001b[0m [tensorboard]\n\u001b[2K Uninstalling ml-dtypes-0.4.1:\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 7/18\u001b[0m [tensorboard]\n\u001b[2K Successfully uninstalled ml-dtypes-0.4.1━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 7/18\u001b[0m [tensorboard]\n\u001b[2K Attempting uninstall: tensorflow[90m━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 7/18\u001b[0m [tensorboard]\n\u001b[2K Found existing installation: tensorflow 2.17.1━━━━━━━━━━━━\u001b[0m \u001b[32m 7/18\u001b[0m [tensorboard]\n\u001b[2K Uninstalling tensorflow-2.17.1:0m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 9/18\u001b[0m [tensorflow]\n\u001b[2K Successfully uninstalled tensorflow-2.17.1━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 9/18\u001b[0m [tensorflow]\n\u001b[2K Attempting uninstall: tf-keras\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 9/18\u001b[0m [tensorflow]\n\u001b[2K Found existing installation: tf_keras 2.17.0━━━━━━━━━━━━━━\u001b[0m \u001b[32m 9/18\u001b[0m [tensorflow]\n\u001b[2K Uninstalling tf_keras-2.17.0:m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10/18\u001b[0m [tf-keras]\n\u001b[2K Successfully uninstalled tf_keras-2.17.090m━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10/18\u001b[0m [tf-keras]\n\u001b[2K Attempting uninstall: tensorflow-text╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10/18\u001b[0m [tf-keras]\n\u001b[2K Found existing installation: tensorflow-text 2.17.0━━━━━━━\u001b[0m \u001b[32m10/18\u001b[0m [tf-keras]\n\u001b[2K Uninstalling tensorflow-text-2.17.0:\u001b[90m━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10/18\u001b[0m [tf-keras]\n\u001b[2K Successfully uninstalled tensorflow-text-2.17.0━━━━━━━━━━━━━\u001b[0m \u001b[32m11/18\u001b[0m [tensorflow-text]\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18/18\u001b[0m [mediapipe-model-maker]apipe]official]timization]\n\u001b[1A\u001b[2K\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ninflect 7.4.0 requires typeguard>=4.0.1, but you have typeguard 2.13.3 which is incompatible.\ntensorflow-decision-forests 1.10.0 requires tensorflow==2.17.0, but you have tensorflow 2.15.1 which is incompatible.\ntensorflow-decision-forests 1.10.0 requires tf-keras~=2.17, but you have tf-keras 2.15.1 which is incompatible.\nydata-profiling 4.12.1 requires typeguard<5,>=3, but you have typeguard 2.13.3 which is incompatible.\u001b[0m\u001b[31m\n\u001b[0mSuccessfully installed keras-2.15.0 mediapipe-0.10.11 mediapipe-model-maker-0.2.1.4 ml-dtypes-0.3.2 portalocker-3.2.0 sacrebleu-2.5.1 seqeval-1.2.2 sounddevice-0.5.2 tensorboard-2.15.2 tensorflow-2.15.1 tensorflow-addons-0.23.0 tensorflow-estimator-2.15.0 tensorflow-model-optimization-0.7.5 tensorflow-text-2.15.0 tf-keras-2.15.1 tf-models-official-2.15.0 typeguard-2.13.3 wrapt-1.14.2\n","output_type":"stream"},{"name":"stderr","text":"/usr/local/lib/python3.10/dist-packages/tensorflow_addons/utils/tfa_eol_msg.py:23: UserWarning: \n\nTensorFlow Addons (TFA) has ended development and introduction of new features.\nTFA has entered a minimal maintenance and release mode until a planned end of life in May 2024.\nPlease modify downstream libraries to take dependencies from other repositories in our TensorFlow community (e.g. Keras, Keras-CV, and Keras-NLP). \n\nFor more information see: https://github.com/tensorflow/addons/issues/2807 \n\n warnings.warn(\n","output_type":"stream"}],"execution_count":2},{"cell_type":"markdown","source":"# ENVIRONMENT","metadata":{}},{"cell_type":"code","source":"!pip list","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:38:41.759783Z","iopub.execute_input":"2025-10-07T02:38:41.760321Z","iopub.status.idle":"2025-10-07T02:38:43.343963Z","shell.execute_reply.started":"2025-10-07T02:38:41.760289Z","shell.execute_reply":"2025-10-07T02:38:43.342824Z"}},"outputs":[{"name":"stdout","text":"Package Version Editable project location\n---------------------------------- -------------------- -------------------------\nabsl-py 1.4.0\naccelerate 1.2.1\naiobotocore 2.17.0\naiofiles 22.1.0\naiohappyeyeballs 2.4.4\naiohttp 3.11.10\naioitertools 0.12.0\naiosignal 1.3.2\naiosqlite 0.20.0\nalabaster 1.0.0\nalbucore 0.0.19\nalbumentations 1.4.20\nalembic 1.14.0\naltair 5.5.0\nannotated-types 0.7.0\nannoy 1.17.3\nansicolors 1.1.8\nantlr4-python3-runtime 4.9.3\nanyio 3.7.1\nargon2-cffi 23.1.0\nargon2-cffi-bindings 21.2.0\nargs 0.1.0\narray_record 0.5.1\narrow 1.3.0\narviz 0.20.0\nastropy 6.1.7\nastropy-iers-data 0.2024.12.16.0.35.48\nasttokens 3.0.0\nastunparse 1.6.3\nasync-timeout 4.0.3\natpublic 4.1.0\nattrs 24.3.0\naudioread 3.0.1\nautograd 1.7.0\nbabel 2.16.0\nbackcall 0.2.0\nbayesian-optimization 2.0.3\nbeautifulsoup4 4.12.3\nbigframes 1.29.0\nbigquery-magics 0.4.0\nbleach 6.2.0\nblinker 1.9.0\nblis 0.7.11\nblobfile 3.0.0\nblosc2 2.7.1\nbokeh 3.6.2\nBoruta 0.4.3\nboto3 1.35.93\nbotocore 1.35.93\nBottleneck 1.4.2\nbq_helper 0.4.1 /root/src/BigQuery_Helper\nbqplot 0.12.43\nbranca 0.8.1\nCacheControl 0.14.1\ncachetools 5.5.0\nCartopy 0.24.1\ncatalogue 2.0.10\ncatboost 1.2.7\ncategory_encoders 2.7.0\ncertifi 2024.12.14\ncesium 0.12.1\ncffi 1.17.1\nchardet 5.2.0\ncharset-normalizer 3.4.0\nChessnut 0.4.1\nchex 0.1.88\nclarabel 0.9.0\nclick 8.1.7\nclick-plugins 1.1.1\ncligj 0.7.2\nclint 0.5.1\ncloudpathlib 0.20.0\ncloudpickle 3.1.0\ncmake 3.31.2\ncmdstanpy 1.2.5\ncolorama 0.4.6\ncolorcet 3.1.0\ncolorlog 6.9.0\ncolorlover 0.3.0\ncolour 0.1.5\ncomm 0.2.2\ncommunity 1.0.0b1\nconfection 0.1.5\ncons 0.4.6\ncontourpy 1.3.1\ncoverage 7.6.10\ncryptography 43.0.3\ncuda-python 12.2.1\ncudf-cu12 24.12.0\ncufflinks 0.17.3\ncuml-cu12 24.12.0\ncupy-cuda12x 12.2.0\ncuvs-cu12 24.12.0\ncvxopt 1.3.2\ncvxpy 1.6.0\ncycler 0.12.1\ncymem 2.0.10\nCython 3.0.11\ncytoolz 1.0.1\ndaal 2025.0.1\ndacite 1.8.1\ndask 2024.11.2\ndask-cuda 24.12.0\ndask-cudf-cu12 24.12.0\ndask-expr 1.1.19\ndataclasses-json 0.6.7\ndatascience 0.17.6\ndatasets 3.2.0\ndatashader 0.16.3\ndb-dtypes 1.3.1\ndbus-python 1.2.18\ndeap 1.4.1\ndebugpy 1.8.0\ndecorator 4.4.2\ndeepdiff 8.1.1\ndefusedxml 0.7.1\nDeprecated 1.2.15\ndiffusers 0.31.0\ndill 0.3.8\ndipy 1.10.0\ndistributed 2024.11.2\ndistributed-ucxx-cu12 0.41.0\ndistro 1.9.0\ndlib 19.24.2\ndm-tree 0.1.8\ndnspython 2.7.0\ndocker 7.1.0\ndocker-pycreds 0.4.0\ndocstring_parser 0.16\ndocstring-to-markdown 0.15\ndocutils 0.21.2\ndopamine_rl 4.1.0\nduckdb 1.1.3\nearthengine-api 1.4.3\neasydict 1.13\neasyocr 1.7.2\neditdistance 0.8.1\neerepr 0.0.4\neinops 0.8.0\neli5 0.13.0\nemoji 2.14.0\nen-core-web-sm 3.7.1\nentrypoints 0.4\net_xmlfile 2.0.0\netils 1.11.0\netuples 0.3.9\neval_type_backport 0.2.0\nexceptiongroup 1.2.2\nexecnb 0.1.11\nFarama-Notifications 0.0.4\nfastai 2.7.18\nfastcore 1.7.27\nfastdownload 0.0.7\nfastjsonschema 2.21.1\nfastprogress 1.0.3\nfastrlock 0.8.2\nfasttext 0.9.3\nfeaturetools 1.31.0\nfilelock 3.16.1\nfiona 1.10.1\nfirebase-admin 6.6.0\nFlask 3.1.0\nflatbuffers 24.3.25\nflax 0.8.5\nfolium 0.19.2\nfonttools 4.55.3\nfqdn 1.5.1\nfrozendict 2.4.6\nfrozenlist 1.5.0\nfsspec 2024.9.0\nfuncy 2.0\nfury 0.12.0\nfuture 1.0.0\nfuzzywuzzy 0.18.0\ngast 0.6.0\ngatspy 0.3\ngcsfs 2024.10.0\nGDAL 3.6.4\ngdown 5.2.0\ngeemap 0.35.1\ngensim 4.3.3\ngeocoder 1.38.1\ngeographiclib 2.0\ngeojson 3.2.0\ngeopandas 0.14.4\ngeopy 2.4.1\nghapi 1.0.6\ngin-config 0.5.0\ngitdb 4.0.11\nGitPython 3.1.43\nglob2 0.7\ngoogle 2.0.3\ngoogle-ai-generativelanguage 0.6.10\ngoogle-api-core 1.34.1\ngoogle-api-python-client 2.155.0\ngoogle-auth 2.27.0\ngoogle-auth-httplib2 0.2.0\ngoogle-auth-oauthlib 1.2.1\ngoogle-cloud-aiplatform 1.74.0\ngoogle-cloud-automl 1.0.1\ngoogle-cloud-bigquery 3.25.0\ngoogle-cloud-bigquery-connection 1.17.0\ngoogle-cloud-bigtable 2.27.0\ngoogle-cloud-core 2.4.1\ngoogle-cloud-datastore 2.20.2\ngoogle-cloud-firestore 2.19.0\ngoogle-cloud-functions 1.19.0\ngoogle-cloud-iam 2.17.0\ngoogle-cloud-language 2.16.0\ngoogle-cloud-pubsub 2.27.1\ngoogle-cloud-resource-manager 1.14.0\ngoogle-cloud-storage 2.14.0\ngoogle-cloud-translate 3.12.1\ngoogle-cloud-videointelligence 2.15.0\ngoogle-cloud-vision 3.9.0\ngoogle-colab 1.0.0\ngoogle-crc32c 1.6.0\ngoogle-genai 0.2.2\ngoogle-generativeai 0.8.3\ngoogle-pasta 0.2.0\ngoogle-resumable-media 2.7.2\ngoogleapis-common-protos 1.66.0\ngoogledrivedownloader 0.4\ngpxpy 1.6.2\ngraphviz 0.20.3\ngreenlet 3.1.1\ngrpc-google-iam-v1 0.13.1\ngrpcio 1.68.1\ngrpcio-status 1.48.2\ngspread 6.0.2\ngspread-dataframe 3.3.1\ngym 0.25.2\ngym-notices 0.0.8\ngymnasium 0.29.0\nh11 0.14.0\nh2o 3.46.0.6\nh5netcdf 1.4.1\nh5py 3.12.1\nhaversine 2.9.0\nhep_ml 0.7.3\nhf_transfer 0.1.9\nholidays 0.63\nholoviews 1.20.0\nhtml5lib 1.1\nhtmlmin 0.1.12\nhttpcore 1.0.7\nhttpimport 1.4.0\nhttplib2 0.22.0\nhttpx 0.28.1\nhuggingface-hub 0.27.0\nhumanize 4.11.0\nhyperopt 0.2.7\nibis-framework 9.2.0\nidna 3.10\nigraph 0.11.8\nImageHash 4.3.1\nimageio 2.36.1\nimageio-ffmpeg 0.5.1\nimagesize 1.4.1\nimbalanced-learn 0.12.4\nimgaug 0.4.0\nimmutabledict 4.2.1\nimportlib_metadata 8.5.0\nimportlib_resources 6.4.5\nimutils 0.5.4\ninflect 7.4.0\niniconfig 2.0.0\nintel-cmplr-lib-rt 2024.2.0\nintel-cmplr-lib-ur 2024.2.0\nintel-openmp 2024.2.0\nipyevents 2.0.2\nipyfilechooser 0.6.0\nipykernel 5.5.6\nipyleaflet 0.19.2\nipympl 0.9.6\nipyparallel 8.8.0\nipython 7.34.0\nipython-genutils 0.2.0\nipython-sql 0.5.0\nipytree 0.2.2\nipywidgets 8.1.5\nisoduration 20.11.0\nisoweek 1.3.3\nitsdangerous 2.2.0\nJanome 0.5.0\njax 0.4.33\njax-cuda12-pjrt 0.4.33\njax-cuda12-plugin 0.4.33\njaxlib 0.4.33\njedi 0.19.2\njeepney 0.7.1\njellyfish 1.1.0\njieba 0.42.1\nJinja2 3.1.4\njiter 0.8.2\njmespath 1.0.1\njoblib 1.4.2\njson5 0.10.0\njsonpatch 1.33\njsonpickle 4.0.1\njsonpointer 3.0.0\njsonschema 4.23.0\njsonschema-specifications 2024.10.1\njupyter_client 8.6.3\njupyter-console 6.1.0\njupyter_core 5.7.2\njupyter-events 0.11.0\njupyter-leaflet 0.19.2\njupyter-lsp 1.5.1\njupyter_server 2.12.5\njupyter_server_fileid 0.9.3\njupyter_server_terminals 0.5.3\njupyter_server_ydoc 0.8.0\njupyter-ydoc 0.2.5\njupyterlab 3.6.8\njupyterlab-lsp 3.10.2\njupyterlab_pygments 0.3.0\njupyterlab_server 2.27.3\njupyterlab_widgets 3.0.13\nkaggle 1.6.17\nkaggle-environments 1.16.10\nkagglehub 0.3.6\nkeras 2.15.0\nkeras-core 0.1.7\nkeras-cv 0.9.0\nkeras-hub 0.18.1\nkeras-nlp 0.18.1\nkeras-tuner 1.4.7\nkeyring 23.5.0\nkiwisolver 1.4.7\nkornia 0.7.4\nkornia_rs 0.1.8\nkt-legacy 1.0.5\nlangchain 0.3.12\nlangchain-core 0.3.25\nlangchain-text-splitters 0.3.3\nlangcodes 3.5.0\nlangid 1.1.6\nlangsmith 0.2.3\nlanguage_data 1.3.0\nlaunchpadlib 1.10.16\nlazr.restfulclient 0.14.4\nlazr.uri 1.0.6\nlazy_loader 0.4\nlearntools 0.3.4\nleven 1.0.4\nlibclang 18.1.1\nlibcudf-cu12 24.12.0\nlibkvikio-cu12 24.12.1\nlibpysal 4.9.2\nlibrosa 0.10.2.post1\nlibucx-cu12 1.17.0.post1\nlibucxx-cu12 0.41.0\nlightgbm 4.5.0\nlightning-utilities 0.11.9\nlime 0.2.0.1\nline_profiler 4.2.0\nlinkify-it-py 2.0.3\nllvmlite 0.43.0\nlml 0.1.0\nlocket 1.0.0\nlogical-unification 0.4.6\nlxml 5.3.0\nMako 1.3.8\nmamba 0.11.3\nmarisa-trie 1.2.1\nMarkdown 3.7\nmarkdown-it-py 3.0.0\nMarkupSafe 3.0.2\nmarshmallow 3.24.2\nmatplotlib 3.7.5\nmatplotlib-inline 0.1.7\nmatplotlib-venn 1.1.1\nmdit-py-plugins 0.4.2\nmdurl 0.1.2\nmediapipe 0.10.11\nmediapipe-model-maker 0.2.1.4\nminiKanren 1.0.3\nmissingno 0.5.2\nmistune 0.8.4\nmizani 0.13.1\nmkl 2025.0.1\nmkl-fft 1.3.8\nmkl-random 1.2.4\nmkl-service 2.4.1\nmkl-umath 0.1.1\nml-dtypes 0.3.2\nmlcrate 0.2.0\nmlxtend 0.23.3\nmne 1.9.0\nmore-itertools 10.5.0\nmoviepy 1.0.3\nmpld3 0.5.10\nmpmath 1.3.0\nmsgpack 1.1.0\nmultidict 6.1.0\nmultimethod 1.12\nmultipledispatch 1.0.0\nmultiprocess 0.70.16\nmultitasking 0.0.11\nmurmurhash 1.0.11\nmusic21 9.3.0\nmypy-extensions 1.0.0\nnamex 0.0.8\nnarwhals 1.18.4\nnatsort 8.4.0\nnbclassic 1.1.0\nnbclient 0.5.13\nnbconvert 6.4.5\nnbdev 2.3.34\nnbformat 5.10.4\nndindex 1.9.2\nnest-asyncio 1.6.0\nnetworkx 3.4.2\nnibabel 5.3.2\nnilearn 0.10.4\nninja 1.11.1.3\nnltk 3.2.4\nnose 1.3.7\nnotebook 6.5.4\nnotebook_shim 0.2.4\nnumba 0.60.0\nnumba-cuda 0.0.17.1\nnumexpr 2.10.2\nnumpy 1.26.4\nnvidia-cublas-cu12 12.6.4.1\nnvidia-cuda-cupti-cu12 12.6.80\nnvidia-cuda-nvcc-cu12 12.6.85\nnvidia-cuda-runtime-cu12 12.6.77\nnvidia-cudnn-cu12 9.6.0.74\nnvidia-cufft-cu12 11.3.0.4\nnvidia-curand-cu12 10.3.7.77\nnvidia-cusolver-cu12 11.7.1.2\nnvidia-cusparse-cu12 12.5.4.2\nnvidia-nccl-cu12 2.23.4\nnvidia-nvcomp-cu12 4.1.0.6\nnvidia-nvjitlink-cu12 12.6.85\nnvtx 0.2.10\nnx-cugraph-cu12 24.10.0\noauth2client 4.1.3\noauthlib 3.2.2\nodfpy 1.4.1\nolefile 0.47\nomegaconf 2.3.0\nonnx 1.17.0\nopenai 1.57.4\nopencv-contrib-python 4.10.0.84\nopencv-python 4.10.0.84\nopencv-python-headless 4.10.0.84\nopenpyxl 3.1.5\nopenslide-bin 4.0.0.6\nopenslide-python 1.4.1\nopentelemetry-api 1.29.0\nopentelemetry-sdk 1.29.0\nopentelemetry-semantic-conventions 0.50b0\nopt_einsum 3.4.0\noptax 0.2.4\noptree 0.13.1\noptuna 4.1.0\norbax-checkpoint 0.6.4\norderly-set 5.2.3\norjson 3.10.12\nosqp 0.6.7.post3\noverrides 7.7.0\npackaging 24.2\npandas 2.2.2\npandas-datareader 0.10.0\npandas-gbq 0.25.0\npandas-profiling 3.6.6\npandas-stubs 2.2.2.240909\npandasql 0.7.3\npandocfilters 1.5.1\npanel 1.5.4\npapermill 2.6.0\nparam 2.2.0\nparso 0.8.4\nparsy 2.1\npartd 1.4.2\npath 17.1.0\npath.py 12.5.0\npathlib 1.0.1\npathos 0.3.2\npatsy 1.0.1\npdf2image 1.17.0\npeewee 3.17.8\npeft 0.14.0\npettingzoo 1.24.0\npexpect 4.9.0\nphik 0.12.4\npickleshare 0.7.5\npillow 11.0.0\npip 25.2\nplatformdirs 4.3.6\nplotly 5.24.1\nplotly-express 0.4.1\nplotnine 0.14.4\npluggy 1.5.0\nply 3.11\npolars 1.9.0\npooch 1.8.2\nportalocker 3.2.0\nportpicker 1.5.2\npox 0.3.5\nppft 1.7.6.9\npreprocessing 0.1.13\npreshed 3.0.9\nprettytable 3.12.0\nproglog 0.1.10\nprogressbar2 4.5.0\nprometheus_client 0.21.1\npromise 2.3\nprompt_toolkit 3.0.48\npropcache 0.2.1\nprophet 1.1.6\nproto-plus 1.25.0\nprotobuf 3.20.3\npsutil 5.9.5\npsycopg2 2.9.10\nptyprocess 0.7.0\npudb 2024.1.3\npy-cpuinfo 9.0.0\npy4j 0.10.9.7\npyaml 25.1.0\nPyArabic 0.6.15\npyarrow 17.0.0\npyasn1 0.6.1\npyasn1_modules 0.4.1\npybind11 2.13.6\npyclipper 1.3.0.post6\npycocotools 2.0.8\npycparser 2.22\npycryptodome 3.21.0\npycryptodomex 3.21.0\npyct 0.5.0\npycuda 2024.1.2\npydantic 2.10.3\npydantic_core 2.27.1\npydata-google-auth 1.9.0\npydegensac 0.1.2\npydicom 3.0.1\npydot 3.0.3\npydotplus 2.0.2\nPyDrive 1.3.1\nPyDrive2 1.21.3\npydub 0.25.1\npyemd 1.0.0\npyerfa 2.0.1.5\npyexcel-io 0.6.7\npyexcel-ods 0.6.0\npygame 2.6.1\npygit2 1.16.0\npygltflib 1.16.3\nPygments 2.18.0\nPyGObject 3.42.1\nPyJWT 2.10.1\npyLDAvis 3.4.1\npylibcudf-cu12 24.12.0\npylibcugraph-cu12 24.10.0\npylibraft-cu12 24.12.0\npymc 5.19.1\npymc3 3.11.4\npymongo 4.10.1\nPympler 1.1\npymystem3 0.2.0\npynvjitlink-cu12 0.4.0\npynvml 11.4.1\npyogrio 0.10.0\nPyomo 6.8.2\nPyOpenGL 3.1.7\npyOpenSSL 24.2.1\npyparsing 3.2.0\npypdf 5.1.0\npyperclip 1.9.0\npyproj 3.7.0\npyshp 2.3.1\nPySocks 1.7.1\npyspark 3.5.3\npytensor 2.26.4\npytesseract 0.3.13\npytest 8.3.4\npython-apt 0.0.0\npython-bidi 0.6.3\npython-box 7.3.0\npython-dateutil 2.8.2\npython-json-logger 3.2.1\npython-louvain 0.16\npython-lsp-jsonrpc 1.1.2\npython-lsp-server 1.12.0\npython-slugify 8.0.4\npython-utils 3.9.1\npytools 2024.1.21\npytorch-ignite 0.5.1\npytorch-lightning 2.5.0.post0\npytz 2024.2\nPyUpSet 0.1.1.post7\npyviz_comms 3.0.3\nPyWavelets 1.8.0\nPyYAML 6.0.2\npyzmq 24.0.1\nqdldl 0.1.7.post4\nqgrid 1.3.1\nqtconsole 5.6.1\nQtPy 2.4.2\nraft-dask-cu12 24.12.0\nrapids-dask-dependency 24.12.0\nratelim 0.1.6\nray 2.40.0\nreferencing 0.35.1\nregex 2024.11.6\nrequests 2.32.3\nrequests-oauthlib 1.3.1\nrequests-toolbelt 1.0.0\nrequirements-parser 0.9.0\nrfc3339-validator 0.1.4\nrfc3986-validator 0.1.1\nrgf-python 3.12.0\nrich 13.9.4\nrmm-cu12 24.12.1\nrpds-py 0.22.3\nrpy2 3.4.2\nrsa 4.9\nRtree 1.3.0\ns3fs 2024.9.0\ns3transfer 0.10.4\nsacrebleu 2.5.1\nsafetensors 0.4.5\nscikit-image 0.25.0\nscikit-learn 1.2.2\nscikit-learn-intelex 2025.0.1\nscikit-multilearn 0.2.0\nscikit-optimize 0.10.2\nscikit-plot 0.3.7\nscikit-surprise 1.1.4\nscipy 1.13.1\nscooby 0.10.0\nscs 3.2.7\nseaborn 0.12.2\nSecretStorage 3.3.1\nsegment_anything 1.0\nsemver 3.0.2\nSend2Trash 1.8.3\nsentence-transformers 3.3.1\nsentencepiece 0.2.0\nsentry-sdk 2.19.2\nseqeval 1.2.2\nsetproctitle 1.3.4\nsetuptools 75.1.0\nsetuptools-scm 8.1.0\nshap 0.44.1\nshapely 2.0.6\nshellingham 1.5.4\nShimmy 1.3.0\nsimple-parsing 0.1.6\nSimpleITK 2.4.0\nsix 1.17.0\nsklearn-pandas 2.2.0\nslicer 0.0.7\nsmart-open 7.0.5\nsmmap 5.0.1\nsniffio 1.3.1\nsnowballstemmer 2.2.0\nsortedcontainers 2.4.0\nsounddevice 0.5.2\nsoundfile 0.12.1\nsoupsieve 2.6\nsoxr 0.5.0.post1\nspacy 3.7.5\nspacy-legacy 3.0.12\nspacy-loggers 1.0.5\nSphinx 8.1.3\nsphinx-rtd-theme 0.2.4\nsphinxcontrib-applehelp 2.0.0\nsphinxcontrib-devhelp 2.0.0\nsphinxcontrib-htmlhelp 2.1.0\nsphinxcontrib-jsmath 1.0.1\nsphinxcontrib-qthelp 2.0.0\nsphinxcontrib-serializinghtml 2.0.0\nSQLAlchemy 2.0.36\nsqlglot 25.1.0\nsqlparse 0.5.3\nsquarify 0.4.4\nsrsly 2.5.0\nstable-baselines3 2.1.0\nstanio 0.5.1\nstatsmodels 0.14.4\nstopit 1.1.2\nStrEnum 0.4.15\nstringzilla 3.11.1\nstumpy 1.13.0\nsympy 1.13.1\ntables 3.10.1\ntabulate 0.9.0\ntbb 2022.0.0\ntbb4py 2022.0.0\ntblib 3.0.0\ntcmlib 1.2.0\ntenacity 9.0.0\ntensorboard 2.15.2\ntensorboard-data-server 0.7.2\ntensorflow 2.15.1\ntensorflow-addons 0.23.0\ntensorflow-cloud 0.1.5\ntensorflow-datasets 4.9.7\ntensorflow_decision_forests 1.10.0\ntensorflow-estimator 2.15.0\ntensorflow-hub 0.16.1\ntensorflow-io 0.37.1\ntensorflow-io-gcs-filesystem 0.37.1\ntensorflow-metadata 1.13.1\ntensorflow-model-optimization 0.7.5\ntensorflow-probability 0.24.0\ntensorflow-text 2.15.0\ntensorstore 0.1.71\ntermcolor 2.5.0\nterminado 0.18.1\ntestpath 0.6.0\ntext-unidecode 1.3\ntextblob 0.17.1\ntexttable 1.7.0\ntf_keras 2.15.1\ntf-models-official 2.15.0\ntf-slim 1.1.0\nTheano 1.0.5\nTheano-PyMC 1.1.2\nthinc 8.2.5\nthreadpoolctl 3.5.0\ntifffile 2024.12.12\ntiktoken 0.8.0\ntimm 1.0.12\ntinycss2 1.4.0\ntokenizers 0.21.0\ntoml 0.10.2\ntomli 2.2.1\ntoolz 0.12.1\ntorch 2.5.1+cu121\ntorchaudio 2.5.1+cu121\ntorchinfo 1.8.0\ntorchmetrics 1.6.1\ntorchsummary 1.5.1\ntorchtune 0.5.0\ntorchvision 0.20.1+cu121\ntornado 6.3.3\nTPOT 0.12.1\ntqdm 4.67.1\ntraitlets 5.7.1\ntraittypes 0.2.1\ntransformers 4.47.0\ntreelite 4.3.0\ntrx-python 0.3\ntsfresh 0.20.2\ntweepy 4.14.0\ntypeguard 2.13.3\ntyper 0.15.1\ntypes-python-dateutil 2.9.0.20241206\ntypes-pytz 2024.2.0.20241003\ntypes-setuptools 75.6.0.20241126\ntyping_extensions 4.12.2\ntyping-inspect 0.9.0\ntzdata 2024.2\ntzlocal 5.2\nuc-micro-py 1.0.3\nucx-py-cu12 0.41.0\nucxx-cu12 0.41.0\nujson 5.10.0\numf 0.9.1\nupdate-checker 0.18.0\nuri-template 1.3.0\nuritemplate 4.1.1\nurllib3 2.2.3\nurwid 2.6.16\nurwid_readline 0.15.1\nvega-datasets 0.9.0\nvisions 0.7.6\nvtk 9.3.1\nwadllib 1.3.6\nWand 0.6.13\nwandb 0.19.1\nwasabi 1.1.3\nwatchdog 6.0.0\nwavio 0.0.9\nwcwidth 0.2.13\nweasel 0.4.1\nwebcolors 24.11.1\nwebencodings 0.5.1\nwebsocket-client 1.8.0\nwebsockets 14.1\nWerkzeug 3.1.3\nwheel 0.45.1\nwidgetsnbextension 4.0.13\nwoodwork 0.31.0\nwordcloud 1.9.4\nwrapt 1.14.2\nwurlitzer 3.1.1\nxarray 2024.11.0\nxarray-einstats 0.8.0\nxgboost 2.0.3\nxlrd 2.0.1\nxvfbwrapper 0.2.9\nxxhash 3.5.0\nxyzservices 2024.9.0\ny-py 0.6.2\nyarl 1.18.3\nydata-profiling 4.12.1\nydf 0.9.0\nyellowbrick 1.5\nyfinance 0.2.50\nypy-websocket 0.8.4\nzict 3.0.0\nzipp 3.21.0\n","output_type":"stream"}],"execution_count":3},{"cell_type":"code","source":"csv_params = text_classifier.CSVParams(\n text_column='processed_text', label_column='label', delimiter=',')\ntrain_data = text_classifier.Dataset.from_csv(\n filename=TRAIN_DIR,\n csv_params=csv_params)\nvalidation_data = text_classifier.Dataset.from_csv(\n filename=VAL_DIR,\n csv_params=csv_params)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.440875Z","iopub.status.idle":"2025-10-07T02:32:17.441226Z","shell.execute_reply":"2025-10-07T02:32:17.441067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"supported_model = text_classifier.SupportedModels.MOBILEBERT_CLASSIFIER\nhparams = text_classifier.BertHParams(epochs=3, batch_size=48, learning_rate=3e-5, export_dir=\"bert_exported_models\")\noptions = text_classifier.TextClassifierOptions(supported_model=supported_model, hparams=hparams)","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.441955Z","iopub.status.idle":"2025-10-07T02:32:17.442240Z","shell.execute_reply":"2025-10-07T02:32:17.442091Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"bert_model = text_classifier.TextClassifier.create(train_data, validation_data, options)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.442803Z","iopub.status.idle":"2025-10-07T02:32:17.443192Z","shell.execute_reply":"2025-10-07T02:32:17.442948Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"bert_model.export_model(model_name='PATH/TO/THE/MODEL/****.tflite')\nbert_model.export_labels(export_dir=options.hparams.export_dir)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.444005Z","iopub.status.idle":"2025-10-07T02:32:17.444295Z","shell.execute_reply":"2025-10-07T02:32:17.444187Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# 3. Quantized","metadata":{}},{"cell_type":"markdown","source":"### To dynamic (for Android devices)","metadata":{}},{"cell_type":"code","source":"quantization_config_for_dynamic = quantization.QuantizationConfig.for_dynamic()\nbert_model.export_model(model_name='PATH/TO/THE/MODEL/****_dynamic.tflite', quantization_config=quantization_config_for_dynamic)\nbert_model.export_labels(export_dir=options.hparams.export_dir)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.445136Z","iopub.status.idle":"2025-10-07T02:32:17.445489Z","shell.execute_reply":"2025-10-07T02:32:17.445358Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### To float 16 (for IOS devices)","metadata":{}},{"cell_type":"code","source":"quantization_config_for_float16 = quantization.QuantizationConfig.for_float16()\nbert_model.export_model(model_name='PATH/TO/THE/MODEL/****_float16.tflite', quantization_config=quantization_config_for_float16)\nbert_model.export_labels(export_dir=options.hparams.export_dir)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.446076Z","iopub.status.idle":"2025-10-07T02:32:17.446387Z","shell.execute_reply":"2025-10-07T02:32:17.446273Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# 4. Validation with testing dataset","metadata":{}},{"cell_type":"code","source":"metrics = bert_model.evaluate(validation_data)\nprint(f'Test loss:{metrics[0]}, Test accuracy:{metrics[1]}')","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.447374Z","iopub.status.idle":"2025-10-07T02:32:17.447653Z","shell.execute_reply":"2025-10-07T02:32:17.447539Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### Zip the model package","metadata":{}},{"cell_type":"code","source":"import shutil\nfolder_path = 'PATH/TO/THE/OUTPUR_DIRECTORY'\nzip_path = 'PATH/TO/THE/OUTPUR_DIRECTORY/*****.zip'\nshutil.make_archive(zip_path.replace('.zip', ''), 'zip', folder_path)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.448518Z","iopub.status.idle":"2025-10-07T02:32:17.448842Z","shell.execute_reply":"2025-10-07T02:32:17.448692Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# 5. Text classification Inference code","metadata":{}},{"cell_type":"code","source":"pip show mediapipe-model-maker","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.449437Z","iopub.status.idle":"2025-10-07T02:32:17.449717Z","shell.execute_reply":"2025-10-07T02:32:17.449612Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import re\nfrom mediapipe.tasks import python\nfrom mediapipe.tasks.python import text\n\nclass TextClassifier:\n def __init__(self, model_path):\n base_options = python.BaseOptions(model_asset_path=model_path)\n options = text.TextClassifierOptions(base_options=base_options)\n self.model = text.TextClassifier.create_from_options(options)\n \n def model_predict(self, input_text: str) -> str:\n classification_result = self.model.classify(input_text)\n top_category = classification_result.classifications[0].categories[0]\n return top_category.score, top_category.category_name\n \n def preprocess_text(self, input_text):\n processed_text = input_text.lower()\n processed_text = re.sub(r\"[^a-zA-Z0-9\\s\\-]\", \"\", processed_text)\n processed_text = re.sub(r\"\\s+\", \" \", processed_text).strip()\n return processed_text\n \n def run(self, input_text):\n text = self.preprocess_text(input_text)\n return self.model_predict(text)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.450408Z","iopub.status.idle":"2025-10-07T02:32:17.452271Z","shell.execute_reply":"2025-10-07T02:32:17.452125Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"## 5.1. TESTING AFTER FINE-TUNING","metadata":{}},{"cell_type":"code","source":"full_model = TextClassifier(\"/PATH/TO/THE/MODEL/***.tflite\")\ndynamid_model = TextClassifier(\"/PATH/TO/THE/MODEL/***_dynamic.tflite\")\nfloat_16_model = TextClassifier(\"/PATH/TO/THE/MODEL/***_float16.tflite\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.452787Z","iopub.status.idle":"2025-10-07T02:32:17.453071Z","shell.execute_reply":"2025-10-07T02:32:17.452948Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def predict(input_text):\n r1 = full_model.run(input_text)\n r2 = dynamid_model.run(input_text)\n r3 = float_16_model.run(input_text)\n print(f'Input: {input_text}')\n print(f'[Full model] Score: {r1[0]}; Label: {r1[1]}')\n print(f'[Dynamic model] Score: {r2[0]}; Label: {r2[1]}')\n print(f'[F16 model] Score: {r3[0]}; Label: {r3[1]}')","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.453517Z","iopub.status.idle":"2025-10-07T02:32:17.453800Z","shell.execute_reply":"2025-10-07T02:32:17.453679Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def predict(input_text):\n r1 = full_model.run(input_text)\n r2 = dynamid_model.run(input_text)\n r3 = float_16_model.run(input_text)\n print(f'Input: {input_text}')\n print(f'[Full model] Score: {r1[0]}; Label: {r1[1]}')\n print(f'[Dynamic model] Score: {r2[0]}; Label: {r2[1]}')\n print(f'[F16 model] Score: {r3[0]}; Label: {r3[1]}')","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.454425Z","iopub.status.idle":"2025-10-07T02:32:17.455838Z","shell.execute_reply":"2025-10-07T02:32:17.455696Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"predict(\"i love you\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.456562Z","iopub.status.idle":"2025-10-07T02:32:17.456859Z","shell.execute_reply":"2025-10-07T02:32:17.456732Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"## 5.2. LOADING MODEL FOR COMBINATION TESTING","metadata":{}},{"cell_type":"code","source":"text_classifier = TextClassifier(\"PATH/TO/THE/MODEL\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.457915Z","iopub.status.idle":"2025-10-07T02:32:17.458562Z","shell.execute_reply":"2025-10-07T02:32:17.458378Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# EMOATIONAL CLASSIFICATION","metadata":{}},{"cell_type":"markdown","source":"# 6. Emmotional classification Inference code","metadata":{}},{"cell_type":"markdown","source":"### 6.1. Install libraries needed","metadata":{}},{"cell_type":"code","source":"import csv\nimport librosa\nimport numpy as np\nimport tensorflow as tf\nfrom numpy.lib.stride_tricks import as_strided\nfrom typing import Tuple, Optional, Union\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef softmax(x):\n exp_x = np.exp(x - np.max(x)) # Subtract max(x) for numerical stability\n return exp_x / np.sum(exp_x, axis=-1, keepdims=True)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.459420Z","iopub.status.idle":"2025-10-07T02:32:17.459962Z","shell.execute_reply":"2025-10-07T02:32:17.459803Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### 6.2. Audio file preprocessing code","metadata":{}},{"cell_type":"code","source":"def mel_scale_scalar(freq: float) -> float:\n \"\"\"Convert frequency to mel scale\"\"\"\n return 1127.0 * np.log(1.0 + freq / 700.0)\n\ndef mel_scale(freq: np.ndarray) -> np.ndarray:\n \"\"\"Vector version of mel scale conversion\"\"\"\n return 1127.0 * np.log(1.0 + freq / 700.0)\n\ndef inverse_mel_scale(mel: np.ndarray) -> np.ndarray:\n \"\"\"Convert mel scale to frequency\"\"\"\n return 700.0 * (np.exp(mel / 1127.0) - 1.0)\n\ndef vtln_warp_mel_freq(vtln_low: float,\n vtln_high: float,\n low_freq: float,\n high_freq: float,\n vtln_warp_factor: float,\n mel_freq: np.ndarray) -> np.ndarray:\n \"\"\"\n Implements VTLN warping for mel frequencies\n \"\"\"\n return mel_freq # Placeholder - implement if VTLN warping is needed\n\ndef get_mel_banks(\n num_bins: int,\n window_length_padded: int,\n sample_freq: float,\n low_freq: float,\n high_freq: float,\n vtln_low: float,\n vtln_high: float,\n vtln_warp_factor: float,\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Get mel filterbank matrices following Kaldi's implementation.\n\n Args:\n num_bins: Number of triangular bins\n window_length_padded: Padded window length\n sample_freq: Sampling frequency\n low_freq: Lowest frequency to consider\n high_freq: Highest frequency to consider\n vtln_low: Lower frequency for VTLN\n vtln_high: Higher frequency for VTLN\n vtln_warp_factor: Warping factor for VTLN\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: (bins, center_freqs)\n - bins: melbank matrix of shape (num_bins, num_fft_bins)\n - center_freqs: center frequencies of bins of shape (num_bins,)\n \"\"\"\n assert num_bins > 3, \"Must have at least 3 mel bins\"\n assert window_length_padded % 2 == 0\n num_fft_bins = window_length_padded // 2\n nyquist = 0.5 * sample_freq\n\n if high_freq <= 0.0:\n high_freq += nyquist\n\n assert (\n (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq)\n ), f\"Bad values in options: low-freq {low_freq} and high-freq {high_freq} vs. nyquist {nyquist}\"\n\n # fft-bin width [think of it as Nyquist-freq / half-window-length]\n fft_bin_width = sample_freq / window_length_padded\n\n mel_low_freq = mel_scale_scalar(low_freq)\n mel_high_freq = mel_scale_scalar(high_freq)\n\n # divide by num_bins+1 in next line because of end-effects where the bins\n # spread out to the sides.\n mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1)\n\n if vtln_high < 0.0:\n vtln_high += nyquist\n\n assert vtln_warp_factor == 1.0 or (\n (low_freq < vtln_low < high_freq) and (0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)\n ), f\"Bad values in options: vtln-low {vtln_low} and vtln-high {vtln_high}, versus low-freq {low_freq} and high-freq {high_freq}\"\n\n bin = np.arange(num_bins)[:, np.newaxis]\n left_mel = mel_low_freq + bin * mel_freq_delta # shape (num_bins, 1)\n center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # shape (num_bins, 1)\n right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # shape (num_bins, 1)\n\n if vtln_warp_factor != 1.0:\n left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel)\n center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel)\n right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel)\n\n center_freqs = inverse_mel_scale(center_mel).squeeze(-1) # shape (num_bins)\n\n # shape (1, num_fft_bins)\n mel = mel_scale(fft_bin_width * np.arange(num_fft_bins))[np.newaxis, :]\n\n # shape (num_bins, num_fft_bins)\n up_slope = (mel - left_mel) / (center_mel - left_mel)\n down_slope = (right_mel - mel) / (right_mel - center_mel)\n\n if vtln_warp_factor == 1.0:\n # left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values\n bins = np.maximum(0.0, np.minimum(up_slope, down_slope))\n else:\n # warping can move the order of left_mel, center_mel, right_mel anywhere\n bins = np.zeros_like(up_slope)\n up_idx = (mel > left_mel) & (mel <= center_mel) # left_mel < mel <= center_mel\n down_idx = (mel > center_mel) & (mel < right_mel) # center_mel < mel < right_mel\n bins[up_idx] = up_slope[up_idx]\n bins[down_idx] = down_slope[down_idx]\n\n return bins, center_freqs\n\ndef stft(\n input: np.ndarray,\n n_fft: int,\n hop_length: Optional[int] = None,\n win_length: Optional[int] = None,\n window: Optional[np.ndarray] = None,\n center: bool = True,\n pad_mode: str = \"reflect\",\n normalized: bool = False,\n onesided: bool = True,\n return_complex: bool = True\n) -> np.ndarray:\n \"\"\"\n NumPy implementation of Short-time Fourier transform (STFT).\n\n Args:\n input: Input signal (B?, L) where B? is optional batch dimension\n n_fft: Size of Fourier transform\n hop_length: Distance between neighboring frames (default: n_fft//4)\n win_length: Window size (default: n_fft)\n window: Window function (1D array)\n center: Whether to pad input for centered frames\n pad_mode: Padding mode ('reflect', 'constant', 'edge')\n normalized: Whether to normalize the STFT\n onesided: Whether to return only positive frequencies\n return_complex: Whether to return complex array\n \"\"\"\n # Set default values\n if hop_length is None:\n hop_length = n_fft // 4\n if win_length is None:\n win_length = n_fft\n\n # Prepare window\n if window is None:\n window = np.ones(win_length)\n\n \n if len(window) < n_fft:\n print('NO VO DAY ROI NE')\n \n \n # Pad window to n_fft if needed \n pad_width = (n_fft - len(window)) // 2\n window = np.pad(window, (pad_width, n_fft - len(window) - pad_width))\n\n # Handle input dimensions\n input = np.asarray(input)\n if input.ndim == 1:\n input = input[np.newaxis, :] # Add batch dimension\n squeeze_batch = True\n else:\n squeeze_batch = False\n\n # Pad signal for centered frames\n if center:\n pad_width = int(n_fft // 2)\n if pad_mode == 'reflect':\n input = np.pad(input, ((0, 0), (pad_width, pad_width)), mode='reflect')\n elif pad_mode == 'constant':\n input = np.pad(input, ((0, 0), (pad_width, pad_width)), mode='constant')\n elif pad_mode == 'edge':\n input = np.pad(input, ((0, 0), (pad_width, pad_width)), mode='edge')\n\n # Calculate number of frames\n n_frames = 1 + (input.shape[-1] - n_fft) // hop_length\n\n # Create frame matrix using stride tricks\n frame_length = n_fft\n frame_step = hop_length\n frame_stride = input.strides[-1]\n\n shape = (input.shape[0], n_frames, frame_length)\n strides = (input.strides[0], frame_step * frame_stride, frame_stride)\n\n frames = as_strided(input, shape=shape, strides=strides, writeable=False)\n\n # Apply window\n frames = frames * window\n\n # Compute FFT\n stft_matrix = np.fft.fft(frames, n=n_fft, axis=-1)\n\n # Normalize if requested\n if normalized:\n stft_matrix = stft_matrix / np.sqrt(n_fft)\n\n # Handle onesided output\n if onesided:\n stft_matrix = stft_matrix[..., :(n_fft // 2) + 1]\n\n # Handle return format\n if return_complex:\n result = stft_matrix\n else:\n result = np.stack((stft_matrix.real, stft_matrix.imag), axis=-1)\n\n # Remove batch dimension if input was 1D\n if squeeze_batch:\n result = result[0]\n\n return result\n\nclass MelSTFT:\n def __init__(self, n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024,\n fmin=0.0, fmax=None, fmin_aug_range=10, fmax_aug_range=2000):\n \"\"\"\n Initialize MelSTFT for audio feature extraction using only NumPy.\n \"\"\"\n self.n_mels = n_mels\n self.sr = sr\n self.win_length = win_length\n self.hopsize = hopsize\n self.n_fft = n_fft\n self.fmin = fmin\n if fmax is None:\n fmax = sr // 2 - fmax_aug_range // 2\n self.fmax = fmax\n\n # Create Hann window\n self.window = np.hanning(win_length) \n \n # Create mel filterbank following Kaldi's implementation\n self.mel_basis, _ = get_mel_banks(\n self.n_mels,\n self.n_fft,\n self.sr,\n self.fmin,\n self.fmax,\n 100.0,\n -500.,\n 1.0\n )\n\n self.mel_basis = np.pad(self.mel_basis, ((0, 0), (0, 1)), mode='constant', constant_values=0)\n \n # Pre-emphasis filter coefficients\n self.preemphasis_coefficient = np.array([-.97, 1]).reshape(1, 1, 2)\n\n def preemphasis(self, x):\n \"\"\"Apply pre-emphasis filter using conv1d equivalent\"\"\"\n # Reshape input to match conv1d input shape (batch, channels, length)\n x = x.reshape(1, 1, -1)\n\n # Implement conv1d manually\n output_size = x.shape[2] - self.preemphasis_coefficient.shape[2] + 1\n result = np.zeros((1, 1, output_size))\n\n for i in range(output_size):\n result[0, 0, i] = np.sum(x[0, 0, i:i+2] * self.preemphasis_coefficient[0, 0])\n\n return result[0]\n\n def __call__(self, x):\n \"\"\"Convert audio to log-Mel spectrogram.\"\"\"\n # Apply pre-emphasis\n x = self.preemphasis(x)\n with open('file.txt', 'w') as f:\n for i, v in enumerate(x[0]):\n f.write(f'{v},')\n if i%10 == 0:\n f.write('\\n')\n \n # Compute STFT\n \n spec_x = stft(\n input=x,\n n_fft=self.n_fft,\n hop_length=self.hopsize,\n win_length=self.win_length,\n window=self.window,\n center=True,\n pad_mode='reflect',\n normalized=False,\n return_complex=False\n )\n print(\"===== STFT params =====\")\n print(f\"n_fft : {self.n_fft}\")\n print(f\"hop_length : {self.hopsize}\")\n print(f\"win_length : {self.win_length}\")\n # print(f\"window : {self.window}\")\n print(\"========================\")\n\n \n # Convert to power spectrogram\n spec_x = np.sum(spec_x ** 2, axis=-1)\n\n\n \n # Apply Mel filterbank (ensuring shapes match)\n melspec = np.dot(self.mel_basis, spec_x.transpose(0,2,1)).transpose(1,0,2) \n # Log-scale and normalize\n melspec = np.log(melspec + 1e-5)\n melspec = (melspec + 4.5) / 5.\n \n return melspec","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.460969Z","iopub.status.idle":"2025-10-07T02:32:17.461524Z","shell.execute_reply":"2025-10-07T02:32:17.461370Z"},"_kg_hide-input":false},"outputs":[],"execution_count":null},{"cell_type":"code","source":"class EmotionalClassifier:\n def __init__(self, model_path, labels_path):\n self.mel = MelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320)\n self.init_model(model_path)\n self.load_lables(labels_path)\n\n def init_model(self, model_path: str):\n self.model = tf.lite.Interpreter(model_path=model_path)\n self.model.allocate_tensors()\n self.input_details = self.model.get_input_details() \n self.output_details = self.model.get_output_details() \n \n def load_lables(self, labels_path: str):\n with open(labels_path, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n lines = list(reader)\n \n self.labels = []\n self.ids = [] \n for i1 in range(1, len(lines)):\n id_ = lines[i1][1]\n label_ = lines[i1][2]\n self.ids.append(id_)\n self.labels.append(label_) \n\n def run(self, audio_path: str):\n # step 1: load lên thoi\n (waveform, _) = librosa.core.load(audio_path, sr=32000, mono=True)\n waveform = np.stack([waveform])\n spec = self.mel(waveform)\n \n if spec.shape[-1] > 400:\n spec = spec[:, :, :400]\n else:\n spec = np.pad(spec, ((0, 0), (0, 0), (0, 400 - spec.shape[-1])), mode='constant')\n \n spec = np.expand_dims(spec, axis=0)\n spec = spec.astype(np.float32)\n\n print(\"\\n\\n===========END===============\")\n print(\"=== Input details ===\")\n for d in self.input_details:\n print(d)\n \n print(\"=== Output details ===\")\n for d in self.output_details:\n print(d)\n\n \n \n # Run inference\n self.model.set_tensor(self.input_details[0]['index'], spec)\n self.model.invoke()\n output_data = self.model.get_tensor(self.output_details[0]['index'])\n\n print(output_data)\n return output_data\n\n def post_process(self, output_data):\n preds = softmax(output_data[0])\n sorted_indexes = np.argsort(preds)[::-1]\n result = {\n self.labels[sorted_indexes[k]]: float(preds[sorted_indexes[k]])\n for k in range(7)\n }\n top_category = {self.labels[sorted_indexes[0]]: float(preds[sorted_indexes[0]])}\n return result, top_category\n\nemotional_classifier = EmotionalClassifier(\n model_path=\"PATH/TO/THE/MODEL/*****.tflite\",\n labels_path=\"PATH/TO/THE/LABEL_PATH/*****.csv\"\n)\n\n# The label files should has the following format:\n # index\tmid\tdisplay_name\n # 0\t/m/09x0r\tAngry\n # 1\t/m/05zppz\tDisgust\n # 2\t/m/02zsn\tFear\n # 3\t/m/0ytgt\tHappy\n # 4\t/m/01h8n0\tneutral\n # 5\t/m/02qldy\tsad\n # 6\t/m/0261r1\tsurprise\n\n\noutput = emotional_classifier.run(\"/kaggle/input/123456789/ronaldo.mp3\")\nevents, top_category = emotional_classifier.post_process(output)\nevents, top_category","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.462361Z","iopub.status.idle":"2025-10-07T02:32:17.462677Z","shell.execute_reply":"2025-10-07T02:32:17.462540Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### 6.3. Model loader","metadata":{}},{"cell_type":"code","source":"import matplotlib.pyplot as plt","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.463679Z","iopub.status.idle":"2025-10-07T02:32:17.463986Z","shell.execute_reply":"2025-10-07T02:32:17.463857Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def load_waveform_from_txt(path: str) -> np.ndarray:\n # Đọc dữ liệu, tách theo dấu phẩy\n with open(path, \"r\") as f:\n text = f.read()\n\n # Tách số và convert sang float\n values = [float(x) for x in text.replace(\"\\n\", \",\").split(\",\") if x.strip() != \"\"]\n waveform = np.array(values, dtype=np.float32)\n\n return waveform","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.464924Z","iopub.status.idle":"2025-10-07T02:32:17.465277Z","shell.execute_reply":"2025-10-07T02:32:17.465139Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# SPEECH-TO-TEXT","metadata":{}},{"cell_type":"markdown","source":"# 7. Speech-to-text Inference code","metadata":{}},{"cell_type":"markdown","source":"### 7.1. Install packages","metadata":{}},{"cell_type":"code","source":"!pip install -U openai-whisper -q\n!pip install yt-dlp pydub ffmpeg\n!sudo apt update && sudo apt install ffmpeg\nimport whisper","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.466176Z","iopub.status.idle":"2025-10-07T02:32:17.466489Z","shell.execute_reply":"2025-10-07T02:32:17.466357Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### 7.2. Speech-to-text model loader","metadata":{}},{"cell_type":"code","source":"class SpeechToText:\n def __init__(self, model_name: str):\n self.model = whisper.load_model(model_name)\n\n def run(self, audio_path: str):\n result = self.model.transcribe(audio_path)\n return result[\"text\"]","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.467048Z","iopub.status.idle":"2025-10-07T02:32:17.467362Z","shell.execute_reply":"2025-10-07T02:32:17.467231Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# 9. Combination inference code","metadata":{}},{"cell_type":"markdown","source":"### 9.1. Text classification","metadata":{}},{"cell_type":"code","source":"def detect_threat(\n list_of_audio_path,\n speech_to_text,\n text_classifier,\n emotional_classifier,\n max_length_context_audio_buffer = 2,\n POTENTIAL_THREAT_THRESHOLD = 0.5\n):\n context_audio_buffer = []\n \n def text_detection(audio_path):\n new_text = speech_to_text.run(audio_path)\n print('text: ', new_text)\n\n context_audio_buffer.append(new_text)\n if len(context_audio_buffer) > max_length_context_audio_buffer:\n context_audio_buffer.pop(0)\n \n text_result = text_classifier.run(new_text)\n if text_result[1] == \"threat\":\n new_text_threat = text_result[0]\n \n if new_text_threat > POTENTIAL_THREAT_THRESHOLD:\n full_text = ' '.join(context_audio_buffer)\n context_threat = text_classifier.run(full_text)\n return context_threat\n else:\n return text_result\n else:\n new_text_threat = 1-text_result[0]\n return (new_text_threat, 'non-threat')\n \n def emotional_detection(audio_path):\n output = emotional_classifier.run(audio_path)\n events, top_category = emotional_classifier.post_process(output)\n return events, top_category\n \n for audio_path in list_of_audio_path:\n text_threat = text_detection(audio_path)\n\n emotional_threat = emotional_detection(audio_path)\n \n print(f'{audio_path}\\nText classifier: {text_threat}')\n print(f'Emotional classifier: {emotional_threat}')\n \n top_labels = str(list(emotional_threat[1].keys())[0])\n text_score = float(text_threat[0])\n emo = emotional_threat[0]\n\n # APPROACH 1\n # if text_score > 0.75:\n # print('*****************************************\\nFINAL THREAT SCORE: ', text_score)\n # elif text_score < 0.75 and text_score >= 0.5:\n # if top_labels in ['Angry', 'Fear']:\n # text_score += 0.25\n # elif top_labels in ['Disgust', 'sad']:\n # text_score += 0.1\n # print('*****************************************\\nFINAL THREAT SCORE: ', text_score)\n # else:\n # if top_labels in ['Angry', 'Fear']:\n # text_score = max(text_score+0.2, 0.5)\n # print('*****************************************\\nFINAL THREAT SCORE: ', text_score)\n # print('*****************************************\\n')\n\n # APPROACH 2\n # w0, w1, w2, w3, w4, w5, w6, w7 = 1, 0.3, 0.3, 0.25, 0.25, -0.05, -0.1, -0.05\n # score = text_score*w0 + emo['Angry']*w1 + emo['Fear']*w2 + emo['Disgust']*w3 + emo['sad']*w4 +\\\n # emo['neutral']*w5 + emo['Happy']*w6 + emo['surprise']*w7\n # print('*****************************************')\n # score = 1 if score > 1.0 else score\n # score = 0 if score < 0.0 else score\n # print('FINAL THREAT SCORE: ', score)\n # print('*****************************************\\n')\n\n # APPROACH 3\n # if text_score >= 0.75:\n # w0, w1, w2, w3, w4, w5, w6, w7 = 0.9, 0.25, 0.25, 0.15, 0.15, 0, -0.15, -0.1\n # elif 0.5 <= text_score < 0.75:\n # w0, w1, w2, w3, w4, w5, w6, w7 = 1, 0.30, 0.30, 0.25, 0.25, -0.05, -0.1, -0.05\n # else:\n # w0, w1, w2, w3, w4, w5, w6, w7 = 0.9, 0.35, 0.35, 0.3, 0.3, 0, -0.05, -0.025\n # score = text_score*w0 + emo['Angry']*w1 + emo['Fear']*w2 + emo['Disgust']*w3 + emo['sad']*w4 +\\\n # emo['neutral']*w5 + emo['Happy']*w6 + emo['surprise']*w7\n # print('*****************************************')\n # score = 1 if score > 1.0 else score\n # score = 0 if score < 0.0 else score\n # print('FINAL THREAT SCORE: ', score)\n # print('*****************************************\\n')\n\n # APPROACH 4\n \n # if text_score >= 0.75:\n # w0, w1, w2, w3, w4, w5, w6, w7 = 0.9, 0.25, 0.25, 0.15, 0.15, 0, 0, 0\n # elif 0.5 <= text_score < 0.75:\n # w0, w1, w2, w3, w4, w5, w6, w7 = 1, 0.5, 0.5, 0.3, 0.3, 0, 0, 0\n # else:\n # w0, w1, w2, w3, w4, w5, w6, w7 = 0.9, 0.8, 0.8, 0.5, 0.5, 0, 0, 0\n # score = text_score*w0 + emo['Angry']*w1 + emo['Fear']*w2 + emo['Disgust']*w3 + emo['sad']*w4 +\\\n # emo['neutral']*w5 + emo['Happy']*w6 + emo['surprise']*w7\n # print('*****************************************')\n # score = 1 if score > 1.0 else score\n # score = 0 if score < 0.0 else score\n # print('FINAL THREAT SCORE: ', score)\n # print('*****************************************\\n')\n\n # APROACH 5:\n def angry_fearful_linear_regression(x):\n if x > 1:\n return 0.25\n elif x < 0:\n return 0.8\n else:\n return 0.1*x**2 - 0.65*x + 0.8\n \n def digust_sad_linear_regression(x):\n if x > 1:\n return 0.15\n elif x < 0:\n return 0.5\n return 0.1*x**2 - 0.45*x + 0.5\n \n #weighting\n angry_fearful = angry_fearful_linear_regression(text_score)\n sad_disgust = digust_sad_linear_regression(text_score)\n\n text_weight = 1.0\n if text_score > 0.75 or text_score < 0.5: \n text_weight = 0.9\n \n w0, w1, w2, w3, w4, w5, w6, w7 = text_weight, angry_fearful, angry_fearful, sad_disgust, sad_disgust, 0, 0, 0\n score = text_score*w0 + emo['Angry']*w1 + emo['Fear']*w2 + emo['Disgust']*w3 + emo['sad']*w4 +\\\n emo['neutral']*w5 + emo['Happy']*w6 + emo['surprise']*w7\n\n print('*****************************************')\n score = 1 if score > 1.0 else score\n score = 0 if score < 0.0 else score\n print('FINAL THREAT SCORE: ', score)\n print('*****************************************\\n')\n return\n ","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.467833Z","iopub.status.idle":"2025-10-07T02:32:17.468126Z","shell.execute_reply":"2025-10-07T02:32:17.467986Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### 9.2. Set up inference environment","metadata":{}},{"cell_type":"code","source":"text_classifier = text_classifier\nemotional_classifier = emotional_classifier\nspeech_to_text = SpeechToText(model_name=\"small\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-07T02:32:17.468580Z","iopub.status.idle":"2025-10-07T02:32:17.468858Z","shell.execute_reply":"2025-10-07T02:32:17.468735Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### 9.3. Inference","metadata":{}}]}