删除多余代码

This commit is contained in:
997146918 2025-08-23 17:27:01 +08:00
parent 6e2b99438b
commit 5ba1d0dbdd
4 changed files with 7 additions and 566 deletions

View File

@ -455,177 +455,7 @@ class DualAIDialogueEngine:
print(f"⚠ 对话评分失败: {e}") print(f"⚠ 对话评分失败: {e}")
return 0.0, "{}", f"评分失败: {str(e)}" return 0.0, "{}", f"评分失败: {str(e)}"
def generate_character_prompt(self, character_name: str, context_info: List[Dict], dialogue_history: List[DialogueTurn],
history_context_count: int = 3, context_info_count: int = 2) -> str:
"""为角色生成对话提示
Args:
character_name: 角色名称
context_info: 相关上下文信息
dialogue_history: 对话历史
history_context_count: 使用的历史对话轮数默认3轮
context_info_count: 使用的上下文信息数量默认2个
"""
char_data = self.kb.character_data.get(character_name, {})
# 基础角色设定
prompt_parts = []
prompt_parts.append(f"你是{character_name},具有以下设定:")
if char_data.get('personality', {}).get('core_traits'):
traits = ", ".join(char_data['personality']['core_traits'])
prompt_parts.append(f"性格特点:{traits}")
if char_data.get('speech_patterns', {}).get('sample_phrases'):
phrases = char_data['speech_patterns']['sample_phrases'][:3]
prompt_parts.append(f"说话风格示例:{'; '.join(phrases)}")
# 当前情境
if char_data.get('current_situation'):
situation = char_data['current_situation']
prompt_parts.append(f"当前状态:{situation.get('current_mood', '')}")
# 相关世界观信息(可控制数量)
if context_info:
prompt_parts.append("相关背景信息:")
for info in context_info[:context_info_count]:
content = info['content'][:200] + "..." if len(info['content']) > 200 else info['content']
prompt_parts.append(f"- {content}")
# 对话历史(可控制数量)
if dialogue_history:
prompt_parts.append("最近的对话:")
# 使用参数控制历史对话轮数
history_to_use = dialogue_history[-history_context_count:] if history_context_count > 0 else []
for turn in history_to_use:
prompt_parts.append(f"{turn.speaker}: {turn.content}")
prompt_parts.append("\n请根据角色设定和上下文生成符合角色特点的自然对话。回复应该在50-150字之间。")
return "\n".join(prompt_parts)
def generate_dialogue(self, session_id: str, current_speaker: str, topic_hint: str = "",
history_context_count: int = 3, context_info_count: int = 2) -> Tuple[str, List[str]]:
"""生成角色对话
Args:
session_id: 会话ID
current_speaker: 当前说话者
topic_hint: 话题提示
history_context_count: 使用的历史对话轮数默认3轮
context_info_count: 使用的上下文信息数量默认2个
"""
# 获取对话历史
dialogue_history = self.conv_mgr.get_conversation_history(session_id)
# 构建搜索查询
if dialogue_history:
# 基于最近的对话内容(可控制数量)
recent_turns = dialogue_history[-history_context_count:] if history_context_count > 0 else []
recent_content = " ".join([turn.content for turn in recent_turns])
search_query = recent_content + " " + topic_hint
else:
# 首次对话
search_query = f"{current_speaker} {topic_hint} introduction greeting"
# 搜索相关上下文
context_info = self.kb.search_relevant_context(search_query, current_speaker, context_info_count)
# 生成提示(使用参数控制上下文数量)
prompt = self.generate_character_prompt(
current_speaker,
context_info,
dialogue_history,
history_context_count,
context_info_count
)
# 生成对话 - 使用双模型系统
try:
# 检查是否为双模型对话系统
if hasattr(self.llm_generator, 'generate_dual_character_dialogue'):
# 使用双模型系统
response = self.llm_generator.generate_dual_character_dialogue(
current_speaker,
prompt,
topic_hint or "请继续对话",
temperature=0.8,
max_new_tokens=150
)
else:
# 兼容旧的单模型系统
response = self.llm_generator.generate_character_dialogue(
current_speaker,
prompt,
topic_hint or "请继续对话",
temperature=0.8,
max_new_tokens=150
)
# 记录使用的上下文
context_used = [f"{info['section']}.{info['subsection']}" for info in context_info[:context_info_count]]
avg_relevance = sum(info['relevance_score'] for info in context_info[:context_info_count]) / len(context_info[:context_info_count]) if context_info else 0.0
# 对对话进行评分
if self.enable_scoring:
dialogue_score, score_details, score_feedback = self.score_dialogue_turn(response, current_speaker, dialogue_history)
print(f" [评分: {dialogue_score:.2f}] {score_feedback}")
else:
dialogue_score, score_details, score_feedback = 0.0, "{}", ""
# 保存对话轮次(包含评分信息)
self.conv_mgr.add_dialogue_turn(
session_id, current_speaker, response, context_used, avg_relevance,
dialogue_score, score_details, score_feedback
)
return response, context_used
except Exception as e:
print(f"✗ 对话生成失败: {e}")
return f"[{current_speaker}暂时无法回应]", []
def run_conversation_turn(self, session_id: str, characters: List[str], turns_count: int = 1, topic: str = "",
history_context_count: int = 3, context_info_count: int = 2):
"""运行对话轮次
Args:
session_id: 会话ID
characters: 角色列表
turns_count: 对话轮数
topic: 对话主题
history_context_count: 使用的历史对话轮数默认3轮
context_info_count: 使用的上下文信息数量默认2个
"""
results = []
print(f" [上下文设置: 历史{history_context_count}轮, 信息{context_info_count}个]")
for i in range(turns_count):
for char in characters:
response, context_used = self.generate_dialogue(
session_id,
char,
topic,
history_context_count,
context_info_count
)
results.append({
"speaker": char,
"content": response,
"context_used": context_used,
"turn": i + 1,
"context_settings": {
"history_count": history_context_count,
"context_info_count": context_info_count
}
})
print(f"{char}: {response}")
# if context_used:
# print(f" [使用上下文: {', '.join(context_used)}]")
print()
return results
def run_dual_model_conversation(self, session_id: str, topic: str = "", turns: int = 4, def run_dual_model_conversation(self, session_id: str, topic: str = "", turns: int = 4,
history_context_count: int = 3, context_info_count: int = 2): history_context_count: int = 3, context_info_count: int = 2):
@ -713,232 +543,3 @@ class DualAIDialogueEngine:
return conversation_results return conversation_results
# def main():
# """主函数 - 演示系统使用"""
# print("=== RAG增强双AI角色对话系统 ===")
# # 设置路径
# knowledge_dir = "./knowledge_base" # 包含世界观和角色文档的目录
# # 检查必要文件
# required_dirs = [knowledge_dir]
# for dir_path in required_dirs:
# if not os.path.exists(dir_path):
# print(f"✗ 目录不存在: {dir_path}")
# print("请确保以下文件存在:")
# print("- ./knowledge_base/worldview_template_coc.json")
# print("- ./knowledge_base/character_template_detective.json")
# print("- ./knowledge_base/character_template_professor.json")
# return
# try:
# # 初始化系统组件
# print("\n初始化系统...")
# kb = RAGKnowledgeBase(knowledge_dir)
# conv_mgr = ConversationManager()
# # 这里需要你的LLM生成器使用新的双模型对话系统
# from npc_dialogue_generator import DualModelDialogueGenerator
# base_model_path = '/mnt/g/Project02/AITrain/Qwen/Qwen3-4B' # 根据你的路径调整
# lora_model_path = './output/NPC_Dialogue_LoRA/final_model'
# if not os.path.exists(lora_model_path):
# lora_model_path = None
# # 创建双模型对话生成器
# if hasattr(kb, 'character_data') and len(kb.character_data) >= 2:
# print("✓ 使用knowledge_base角色数据创建双模型对话系统")
# # 获取前两个角色
# character_names = list(kb.character_data.keys())[:2]
# char1_name = character_names[0]
# char2_name = character_names[1]
# # 配置两个角色的模型
# character1_config = {
# "name": char1_name,
# "lora_path": lora_model_path, # 可以为每个角色设置不同的LoRA
# "character_data": kb.character_data[char1_name]
# }
# character2_config = {
# "name": char2_name,
# "lora_path": lora_model_path, # 可以为每个角色设置不同的LoRA
# "character_data": kb.character_data[char2_name]
# }
# llm_generator = DualModelDialogueGenerator(
# base_model_path,
# character1_config,
# character2_config
# )
# else:
# print("⚠ 角色数据不足,无法创建双模型对话系统")
# return
# # 创建对话引擎
# dialogue_engine = DualAIDialogueEngine(kb, conv_mgr, llm_generator)
# print("✓ 系统初始化完成")
# # 交互式菜单
# while True:
# print("\n" + "="*50)
# print("双AI角色对话系统")
# print("1. 创建新对话")
# print("2. 继续已有对话")
# print("3. 查看对话历史")
# print("4. 列出所有会话")
# print("0. 退出")
# print("="*50)
# choice = input("请选择操作: ").strip()
# if choice == '0':
# break
# elif choice == '1':
# # 创建新对话
# print(f"可用角色: {list(kb.character_data.keys())}")
# characters = input("请输入两个角色名称(用空格分隔): ").strip().split()
# if len(characters) != 2:
# print("❌ 请输入正好两个角色名称")
# continue
# worldview = kb.worldview_data.get('worldview_name', '未知世界观') if kb.worldview_data else '未知世界观'
# session_id = conv_mgr.create_session(characters, worldview)
# topic = input("请输入对话主题(可选): ").strip()
# turns = int(input("请输入对话轮次数量默认2: ").strip() or "2")
# # 历史上下文控制选项
# print("\n历史上下文设置:")
# history_count = input("使用历史对话轮数默认30表示不使用: ").strip()
# history_count = int(history_count) if history_count.isdigit() else 3
# context_info_count = input("使用上下文信息数量默认2: ").strip()
# context_info_count = int(context_info_count) if context_info_count.isdigit() else 2
# print(f"\n开始对话 - 会话ID: {session_id}")
# print(f"上下文设置: 历史{history_count}轮, 信息{context_info_count}个")
# # 询问是否使用双模型对话
# use_dual_model = input("是否使用双模型对话系统?(y/n默认y): ").strip().lower()
# if use_dual_model != 'n':
# print("使用双模型对话系统...")
# dialogue_engine.run_dual_model_conversation(session_id, topic, turns, history_count, context_info_count)
# else:
# print("使用传统对话系统...")
# dialogue_engine.run_conversation_turn(session_id, characters, turns, topic, history_count, context_info_count)
# elif choice == '2':
# # 继续已有对话
# sessions = conv_mgr.list_sessions()
# if not sessions:
# print("❌ 没有已有对话")
# continue
# print("已有会话:")
# for i, session in enumerate(sessions[:5]):
# chars = ", ".join(session['characters'])
# print(f"{i+1}. {session['session_id'][:8]}... ({chars}) - {session['last_update'][:16]}")
# try:
# idx = int(input("请选择会话编号: ").strip()) - 1
# if 0 <= idx < len(sessions):
# session = sessions[idx]
# session_id = session['session_id']
# characters = session['characters']
# # 显示最近的对话
# history = conv_mgr.get_conversation_history(session_id, 4)
# if history:
# print("\n最近的对话:")
# for turn in history:
# print(f"{turn.speaker}: {turn.content}")
# topic = input("请输入对话主题(可选): ").strip()
# turns = int(input("请输入对话轮次数量默认1: ").strip() or "1")
# # 历史上下文控制选项
# print("\n历史上下文设置:")
# history_count = input("使用历史对话轮数默认30表示不使用: ").strip()
# history_count = int(history_count) if history_count.isdigit() else 3
# context_info_count = input("使用上下文信息数量默认2: ").strip()
# context_info_count = int(context_info_count) if context_info_count.isdigit() else 2
# print(f"\n继续对话 - 会话ID: {session_id}")
# print(f"上下文设置: 历史{history_count}轮, 信息{context_info_count}个")
# # 询问是否使用双模型对话
# use_dual_model = input("是否使用双模型对话系统?(y/n默认y): ").strip().lower()
# if use_dual_model != 'n':
# print("使用双模型对话系统...")
# dialogue_engine.run_dual_model_conversation(session_id, topic, turns, history_count, context_info_count)
# else:
# print("使用传统对话系统...")
# dialogue_engine.run_conversation_turn(session_id, characters, turns, topic, history_count, context_info_count)
# else:
# print("❌ 无效的会话编号")
# except ValueError:
# print("❌ 请输入有效的数字")
# elif choice == '3':
# # 查看对话历史
# session_id = input("请输入会话ID前8位即可: ").strip()
# # 查找匹配的会话
# sessions = conv_mgr.list_sessions()
# matching_session = None
# for session in sessions:
# if session['session_id'].startswith(session_id):
# matching_session = session
# break
# if matching_session:
# full_session_id = matching_session['session_id']
# history = conv_mgr.get_conversation_history(full_session_id, 20)
# if history:
# print(f"\n对话历史 - {full_session_id}")
# print(f"角色: {', '.join(matching_session['characters'])}")
# print(f"世界观: {matching_session['worldview']}")
# print("-" * 50)
# for turn in history:
# print(f"[{turn.timestamp[:16]}] {turn.speaker}:")
# print(f" {turn.content}")
# if turn.context_used:
# print(f" 使用上下文: {', '.join(turn.context_used)}")
# print()
# else:
# print("该会话暂无对话历史")
# else:
# print("❌ 未找到匹配的会话")
# elif choice == '4':
# # 列出所有会话
# sessions = conv_mgr.list_sessions()
# if sessions:
# print(f"\n共有 {len(sessions)} 个对话会话:")
# for session in sessions:
# chars = ", ".join(session['characters'])
# print(f"ID: {session['session_id']}")
# print(f" 角色: {chars}")
# print(f" 世界观: {session['worldview']}")
# print(f" 最后更新: {session['last_update']}")
# print()
# else:
# print("暂无对话会话")
# else:
# print("❌ 无效选择")
# except Exception as e:
# print(f"✗ 系统运行出错: {e}")
# import traceback
# traceback.print_exc()
# if __name__ == '__main__':
# main()

View File

@ -121,7 +121,7 @@ def show_character_info():
except Exception as e: except Exception as e:
print(f"✗ 读取角色文件失败: {char_file} - {e}") print(f"✗ 读取角色文件失败: {char_file} - {e}")
def run_dialogue_system(): def run_dialogue_system(enableScore: bool):
"""运行双AI对话系统""" """运行双AI对话系统"""
print("\n" + "="*60) print("\n" + "="*60)
print("启动双AI角色对话系统") print("启动双AI角色对话系统")
@ -191,7 +191,7 @@ def run_dialogue_system():
kb, kb,
conv_mgr, conv_mgr,
dual_generator, dual_generator,
enable_scoring=True, enable_scoring=enableScore,
base_model_path=base_model_path base_model_path=base_model_path
) )
@ -260,68 +260,6 @@ def run_dialogue_system():
import traceback import traceback
traceback.print_exc() traceback.print_exc()
def create_demo_scenario():
"""创建演示场景"""
print("\n创建演示对话场景...")
try:
from dual_ai_dialogue_system import RAGKnowledgeBase, ConversationManager, DualAIDialogueEngine
from npc_dialogue_generator import NPCDialogueGenerator
# 初始化组件
kb = RAGKnowledgeBase("./knowledge_base")
conv_mgr = ConversationManager("./conversation_data/demo_conversations.db")
# 检查模型路径
base_model_path = '/mnt/e/AI/Project02/AITrain/Qwen/Qwen3-4B'
lora_model_path = './output/NPC_Dialogue_LoRA/final_model'
if not os.path.exists(base_model_path):
print(f"✗ 基础模型路径不存在: {base_model_path}")
print("请修改 main_controller.py 中的模型路径")
return
if not os.path.exists(lora_model_path):
lora_model_path = None
print("⚠ LoRA模型不存在使用基础模型")
llm_generator = NPCDialogueGenerator(base_model_path, lora_model_path, kb.character_data)
dialogue_engine = DualAIDialogueEngine(kb, conv_mgr, llm_generator)
# 创建演示对话
characters = ["维多利亚·布莱克伍德", "阿奇博尔德·韦恩"]
worldview = "克苏鲁的呼唤"
session_id = conv_mgr.create_session(characters, worldview)
print(f"✓ 创建演示会话: {session_id}")
# 运行几轮对话
topic = "最近发生的神秘事件"
print(f"\n开始演示对话 - 主题: {topic}")
print("-" * 40)
# 演示不同的历史上下文设置
# print("演示1: 使用默认上下文设置历史3轮信息2个")
# dialogue_engine.run_conversation_turn(session_id, characters, 6, topic)
session_id = conv_mgr.create_session(characters, worldview)
print(f"✓ 创建演示会话: {session_id}")
print("\n演示3: 使用最少历史上下文历史1轮信息1个")
dialogue_engine.run_conversation_turn(session_id, characters, 6, topic, 1, 10)
session_id = conv_mgr.create_session(characters, worldview)
print(f"✓ 创建演示会话: {session_id}")
print("\n演示2: 使用更多历史上下文历史10轮信息10个")
dialogue_engine.run_conversation_turn(session_id, characters, 6, topic, 5, 10)
print(f"\n✓ 演示完成会话ID: {session_id}")
print("你可以通过主对话系统继续这个对话")
except Exception as e:
print(f"✗ 演示场景创建失败: {e}")
import traceback
traceback.print_exc()
def analyze_model_performance(): def analyze_model_performance():
"""分析模型性能""" """分析模型性能"""
@ -1333,8 +1271,8 @@ def main():
print("主菜单 - 请选择操作:") print("主菜单 - 请选择操作:")
print("1. 处理PDF世界观文档 (转换为RAG格式)") print("1. 处理PDF世界观文档 (转换为RAG格式)")
print("2. 查看角色设定信息") print("2. 查看角色设定信息")
print("3. 启动双AI对话系统 (支持双模型对话)") print("3. 启动双AI对话系统 (开启ai打分)")
print("4. 创建演示对话场景") print("4. 启动双AI对话系统 (关闭ai打分)")
print("5. 系统状态检查") print("5. 系统状态检查")
print("6. 查看对话评分统计") print("6. 查看对话评分统计")
print("7. 模型性能分析与优化") print("7. 模型性能分析与优化")
@ -1357,10 +1295,10 @@ def main():
show_character_info() show_character_info()
elif choice == '3': elif choice == '3':
run_dialogue_system() run_dialogue_system(enableScore = True)
elif choice == '4': elif choice == '4':
create_demo_scenario() run_dialogue_system(enableScore = False)
elif choice == '5': elif choice == '5':
show_system_status() show_system_status()

View File

@ -471,101 +471,3 @@ class DualModelDialogueGenerator:
"""列出两个角色名称""" """列出两个角色名称"""
return [self.character1_config['name'], self.character2_config['name']] return [self.character1_config['name'], self.character2_config['name']]
# def main():
# """测试对话生成器"""
# # 配置路径
# base_model_path = '/mnt/g/Project02/AITrain/Qwen/Qwen3-8B-AWQ'
# lora_model_path = './output/NPC_Dialogue_LoRA/final_model' # 如果没有训练LoRA设为None
# # 检查LoRA模型是否存在
# if not os.path.exists(lora_model_path):
# print("LoRA模型不存在使用基础模型")
# lora_model_path = None
# # 创建对话生成器
# generator = NPCDialogueGenerator(base_model_path, lora_model_path)
# print("=== 游戏NPC角色对话生成器 ===")
# print(f"可用角色:{', '.join(generator.list_available_characters())}")
# # 测试单个角色对话生成
# print("\n=== 单角色对话测试 ===")
# test_scenarios = [
# {
# "character": "克莱恩",
# "context": "玩家向你咨询神秘学知识",
# "input": "请告诉我一些关于灵界的注意事项。"
# },
# {
# "character": "阿兹克",
# "context": "学生遇到了修炼瓶颈",
# "input": "导师,我在修炼中遇到了困难。"
# },
# {
# "character": "塔利姆",
# "context": "在俱乐部偶遇老朋友",
# "input": "好久不见,最近怎么样?"
# }
# ]
# for scenario in test_scenarios:
# print(f"\n--- {scenario['character']} ---")
# print(f"情境:{scenario['context']}")
# print(f"输入:{scenario['input']}")
# dialogue = generator.generate_character_dialogue(
# scenario["character"],
# scenario["context"],
# scenario["input"]
# )
# print(f"回复:{dialogue}")
# # 测试角色间对话
# print("\n=== 角色间对话测试 ===")
# conversation = generator.generate_dialogue_conversation(
# "克莱恩", "塔利姆", "最近遇到的神秘事件", turns=4
# )
# for turn in conversation:
# print(f"{turn['speaker']}{turn['dialogue']}")
# # 交互式对话模式
# print("\n=== 交互式对话模式 ===")
# print("输入格式:角色名 上下文 用户输入")
# print("例如:克莱恩 在俱乐部 请给我一些建议")
# print("输入'quit'退出")
# while True:
# try:
# user_command = input("\n请输入指令: ").strip()
# if user_command.lower() == 'quit':
# break
# parts = user_command.split(' ', 2)
# if len(parts) < 2:
# print("格式错误,请使用:角色名 上下文 [用户输入]")
# continue
# character = parts[0]
# context = parts[1]
# user_input = parts[2] if len(parts) > 2 else ""
# if character not in generator.list_available_characters():
# print(f"未知角色:{character}")
# print(f"可用角色:{', '.join(generator.list_available_characters())}")
# continue
# dialogue = generator.generate_character_dialogue(
# character, context, user_input
# )
# print(f"\n{character}{dialogue}")
# except KeyboardInterrupt:
# break
# except Exception as e:
# print(f"生成对话时出错:{e}")
# print("\n对话生成器已退出")
# if __name__ == '__main__':
# main()