关闭gpu显存检测

This commit is contained in:
997146918 2025-08-08 19:46:19 +08:00
parent 54751fe89e
commit a5fd61fd25

View File

@ -27,7 +27,8 @@ if platform.system() == "Windows":
multiprocessing.set_start_method('spawn', force=True) multiprocessing.set_start_method('spawn', force=True)
os.environ['VLLM_USE_MODELSCOPE'] = 'True' os.environ['VLLM_USE_MODELSCOPE'] = 'True'
os.environ['DEEPSEEK_API'] = 'sk-8238e4a0efa748208adb1bf6e9d441f2'
os.environ['DEEPSEEK_BASE_URL'] = 'https://api.deepseek.com'
class BaseModel: class BaseModel:
def __init__(self, path: str = '') -> None: def __init__(self, path: str = '') -> None:
@ -147,7 +148,7 @@ class DeepseekChat(BaseModel):
def _chat_with_local(self, system_prompt: str, user_prompt: str, temperature: float, def _chat_with_local(self, system_prompt: str, user_prompt: str, temperature: float,
top_p: float, top_k: int, min_p: float, max_tokens: int, max_model_len: int) -> str: top_p: float, top_k: int, min_p: float, max_tokens: int, max_model_len: int) -> str:
# 检查GPU显存 # 检查GPU显存
self.check_gpu_memory() #self.check_gpu_memory()
# 只初始化一次LLM # 只初始化一次LLM
self._initialize_llm_if_needed(max_model_len) self._initialize_llm_if_needed(max_model_len)