diff --git a/docker/llm/serving/cpu/docker/model_adapter.py.patch b/docker/llm/serving/cpu/docker/model_adapter.py.patch index 2957468d6e0..b9a68a3a54d 100644 --- a/docker/llm/serving/cpu/docker/model_adapter.py.patch +++ b/docker/llm/serving/cpu/docker/model_adapter.py.patch @@ -1,11 +1,11 @@ ---- model_adapter.py.old 2024-01-24 01:56:23.903144335 +0000 -+++ model_adapter.py 2024-01-24 01:59:22.605062765 +0000 -@@ -1346,15 +1346,17 @@ +--- model_adapter.py.old 2024-03-05 15:08:47.169275336 +0800 ++++ model_adapter.py 2024-03-05 15:10:13.434703674 +0800 +@@ -1690,15 +1690,17 @@ ) # NOTE: if you use the old version of model file, please remove the comments below # config.use_flash_attn = False -- config.fp16 = True -+ # config.fp16 = True +- self.float_set(config, "fp16") ++ # self.float_set(config, "fp16") generation_config = GenerationConfig.from_pretrained( model_path, trust_remote_code=True ) diff --git a/python/llm/setup.py b/python/llm/setup.py index b9bb0317566..6880a745d40 100644 --- a/python/llm/setup.py +++ b/python/llm/setup.py @@ -53,7 +53,7 @@ 'transformers == 4.31.0', 'sentencepiece', 'tokenizers == 0.13.3', # TODO: Support accelerate 0.22.0 'accelerate == 0.21.0', 'tabulate'] -SERVING_DEP = ['fschat[model_worker, webui] == 0.2.28', 'protobuf'] +SERVING_DEP = ['fschat[model_worker, webui] == 0.2.36', 'protobuf'] windows_binarys = [ "llama.dll", "gptneox.dll",