From a5fd61fd255e1010788020bb44267e18ba84bb58 Mon Sep 17 00:00:00 2001 From: 997146918 <997146918@qq.com> Date: Fri, 8 Aug 2025 19:46:19 +0800 Subject: [PATCH] =?UTF-8?q?=E5=85=B3=E9=97=ADgpu=E6=98=BE=E5=AD=98?= =?UTF-8?q?=E6=A3=80=E6=B5=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- AITrain/LLM.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/AITrain/LLM.py b/AITrain/LLM.py index 43a0130..90441d0 100644 --- a/AITrain/LLM.py +++ b/AITrain/LLM.py @@ -27,7 +27,8 @@ if platform.system() == "Windows": multiprocessing.set_start_method('spawn', force=True) os.environ['VLLM_USE_MODELSCOPE'] = 'True' - +os.environ['DEEPSEEK_API'] = 'sk-8238e4a0efa748208adb1bf6e9d441f2' +os.environ['DEEPSEEK_BASE_URL'] = 'https://api.deepseek.com' class BaseModel: def __init__(self, path: str = '') -> None: @@ -147,7 +148,7 @@ class DeepseekChat(BaseModel): def _chat_with_local(self, system_prompt: str, user_prompt: str, temperature: float, top_p: float, top_k: int, min_p: float, max_tokens: int, max_model_len: int) -> str: # 检查GPU显存 - self.check_gpu_memory() + #self.check_gpu_memory() # 只初始化一次LLM self._initialize_llm_if_needed(max_model_len)