From f7661b5b0adbf08fba360ec047f880c3565a696e Mon Sep 17 00:00:00 2001
From: 997146918 <997146918@qq.com>
Date: Tue, 1 Jul 2025 16:16:32 +0800
Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E6=94=B9ollama=20api=E6=8E=A5?=
=?UTF-8?q?=E5=8F=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
AIGC/main.py | 80 +++++++++++++++++++++++++++++++---------------------
1 file changed, 48 insertions(+), 32 deletions(-)
diff --git a/AIGC/main.py b/AIGC/main.py
index fe84656..96330e8 100644
--- a/AIGC/main.py
+++ b/AIGC/main.py
@@ -138,30 +138,42 @@ def run_webserver():
log_level="info"
)
-async def generateAIChat(prompt: str, websocket: WebSocket):
+async def generateAIChat(promptStr: str, websocket: WebSocket| None = None):
#动态标识吗 防止重复输入导致的结果重复
dynamic_token = str(int(time.time() % 1000))
- prompt = f"""
+ promptStr = f"""
[动态标识码:{dynamic_token}]
- """ + prompt
- logger.log(logging.INFO, "prompt:" + prompt)
+ """ + promptStr
+ logger.log(logging.INFO, "prompt:" + promptStr)
starttime = time.time()
receivemessage=[
- {"role": "system", "content": prompt}
+ {"role": "system", "content": promptStr}
]
try:
- response = ollamaClient.chat(
+ # response = ollamaClient.chat(
+ # model = args.model,
+ # stream = False,
+ # messages = receivemessage,
+ # options={
+ # "temperature": random.uniform(1.0, 1.5),
+ # "repeat_penalty": 1.2, # 抑制重复
+ # "top_p": random.uniform(0.7, 0.95),
+ # "num_ctx": 4096, # 上下文长度
+ # "seed": int(time.time() * 1000) % 1000000
+ # }
+ # )
+ response = ollamaClient.generate(
model = args.model,
stream = False,
- messages = receivemessage,
+ prompt = promptStr,
options={
"temperature": random.uniform(1.0, 1.5),
"repeat_penalty": 1.2, # 抑制重复
"top_p": random.uniform(0.7, 0.95),
"num_ctx": 4096, # 上下文长度
- "seed": int(time.time() * 1000) % 1000000
}
)
+
except ResponseError as e:
if e.status_code == 503:
print("🔄 服务不可用,5秒后重试...")
@@ -170,9 +182,11 @@ async def generateAIChat(prompt: str, websocket: WebSocket):
print(f"🔥 未预料错误: {str(e)}")
return await senddata(websocket, -1, messages=["未预料错误"])
logger.log(logging.INFO, "接口调用耗时 :" + str(time.time() - starttime))
- logger.log(logging.INFO, "AI生成" + response['message']['content'])
+ #aiResponse = response['message']['content']
+ aiResponse = response['response']
+ logger.log(logging.INFO, "AI生成" + aiResponse)
#处理ai输出内容
- think_remove_text = re.sub(r'.*?', '', response['message']['content'], flags=re.DOTALL)
+ think_remove_text = re.sub(r'.*?', '', aiResponse, flags=re.DOTALL)
pattern = r".*(.*?)" # .* 吞掉前面所有字符,定位最后一组
match = re.search(pattern, think_remove_text, re.DOTALL)
@@ -220,29 +234,31 @@ if __name__ == "__main__":
server_thread.daemon = True # 设为守护线程(主程序退出时自动终止)
server_thread.start()
- ## Test
- # generateAIChat(f"""
- # 你是一个游戏NPC对话生成器。请严格按以下要求生成两个路人NPC(A和B)的日常对话:
- # 1. 生成【2轮完整对话】,每轮包含双方各一次发言(共4句)
- # 2. 对话场景:中世纪奇幻小镇的日常场景(如市场/酒馆/街道)
- # 3. 角色设定:
- # - NPC A:随机职业(铁匠/农夫/商人/卫兵等)
- # - NPC B:随机职业(不同于A)
- # 4. 对话要求:
- # * 每轮对话需自然衔接,体现生活细节
- # * 避免任务指引或玩家交互内容
- # * 结尾保持对话未完成感
- # 5. 输出格式(严格遵循,):
- # ---
- # A:[第一轮发言]
- # B:[第一轮回应]
- # A:[第二轮发言]
- # B:[第二轮回应]
- # ---
- # """
-
- # )
+ # Test
+ asyncio.run(
+ generateAIChat(promptStr = f"""
+ 你是一个游戏NPC对话生成器。请严格按以下要求生成两个路人NPC(A和B)的日常对话:
+ 1. 生成【2轮完整对话】,每轮包含双方各一次发言(共4句)
+ 2. 对话场景:中世纪奇幻小镇的日常场景(如市场/酒馆/街道)
+ 3. 角色设定:
+ - NPC A:随机职业(铁匠/农夫/商人/卫兵等)
+ - NPC B:随机职业(不同于A)
+ 4. 对话要求:
+ * 每轮对话需自然衔接,体现生活细节
+ * 避免任务指引或玩家交互内容
+ * 结尾保持对话未完成感
+ 5. 输出格式(严格遵循,):
+ ---
+ A:[第一轮发言]
+ B:[第一轮回应]
+ A:[第二轮发言]
+ B:[第二轮回应]
+ ---
+ """
+ )
+ )
+
try:
# 主线程永久挂起(监听退出信号)
while True: