Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Ollama bot integration and update configuration settings #2375

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
.vs
.wechaty/
__pycache__/
env/
venv*
*.pyc
config.json
Expand Down
116 changes: 66 additions & 50 deletions bot/bot_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
channel factory
"""
from common import const
from common.log import logger


def create_bot(bot_type):
Expand All @@ -10,63 +11,78 @@ def create_bot(bot_type):
:param bot_type: bot type code
:return: bot instance
"""
if bot_type == const.BAIDU:
# 替换Baidu Unit为Baidu文心千帆对话接口
# from bot.baidu.baidu_unit_bot import BaiduUnitBot
# return BaiduUnitBot()
from bot.baidu.baidu_wenxin import BaiduWenxinBot
return BaiduWenxinBot()
logger.info(f"正在连接AI模型: {bot_type}")

elif bot_type == const.CHATGPT:
# ChatGPT 网页端web接口
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
return ChatGPTBot()
try:
if bot_type == const.BAIDU:
# 替换Baidu Unit为Baidu文心千帆对话接口
# from bot.baidu.baidu_unit_bot import BaiduUnitBot
# return BaiduUnitBot()
from bot.baidu.baidu_wenxin import BaiduWenxinBot
logger.info("已连接到百度文心千帆模型")
return BaiduWenxinBot()

elif bot_type == const.OPEN_AI:
# OpenAI 官方对话模型API
from bot.openai.open_ai_bot import OpenAIBot
return OpenAIBot()
elif bot_type == const.CHATGPT:
# ChatGPT 网页端web接口
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
logger.info("已连接到ChatGPT网页版")
return ChatGPTBot()

elif bot_type == const.CHATGPTONAZURE:
# Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
return AzureChatGPTBot()
elif bot_type == const.OPEN_AI:
# OpenAI 官方对话模型API
from bot.openai.open_ai_bot import OpenAIBot
logger.info("已连接到OpenAI API")
return OpenAIBot()

elif bot_type == const.XUNFEI:
from bot.xunfei.xunfei_spark_bot import XunFeiBot
return XunFeiBot()
elif bot_type == const.CHATGPTONAZURE:
# Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
return AzureChatGPTBot()

elif bot_type == const.LINKAI:
from bot.linkai.link_ai_bot import LinkAIBot
return LinkAIBot()
elif bot_type == const.XUNFEI:
from bot.xunfei.xunfei_spark_bot import XunFeiBot
return XunFeiBot()

elif bot_type == const.CLAUDEAI:
from bot.claude.claude_ai_bot import ClaudeAIBot
return ClaudeAIBot()
elif bot_type == const.CLAUDEAPI:
from bot.claudeapi.claude_api_bot import ClaudeAPIBot
return ClaudeAPIBot()
elif bot_type == const.QWEN:
from bot.ali.ali_qwen_bot import AliQwenBot
return AliQwenBot()
elif bot_type == const.QWEN_DASHSCOPE:
from bot.dashscope.dashscope_bot import DashscopeBot
return DashscopeBot()
elif bot_type == const.GEMINI:
from bot.gemini.google_gemini_bot import GoogleGeminiBot
return GoogleGeminiBot()
elif bot_type == const.LINKAI:
from bot.linkai.link_ai_bot import LinkAIBot
return LinkAIBot()

elif bot_type == const.ZHIPU_AI:
from bot.zhipuai.zhipuai_bot import ZHIPUAIBot
return ZHIPUAIBot()
elif bot_type == const.CLAUDEAI:
from bot.claude.claude_ai_bot import ClaudeAIBot
return ClaudeAIBot()
elif bot_type == const.CLAUDEAPI:
from bot.claudeapi.claude_api_bot import ClaudeAPIBot
return ClaudeAPIBot()
elif bot_type == const.QWEN:
from bot.ali.ali_qwen_bot import AliQwenBot
return AliQwenBot()
elif bot_type == const.QWEN_DASHSCOPE:
from bot.dashscope.dashscope_bot import DashscopeBot
return DashscopeBot()
elif bot_type == const.GEMINI:
from bot.gemini.google_gemini_bot import GoogleGeminiBot
return GoogleGeminiBot()

elif bot_type == const.MOONSHOT:
from bot.moonshot.moonshot_bot import MoonshotBot
return MoonshotBot()

elif bot_type == const.MiniMax:
from bot.minimax.minimax_bot import MinimaxBot
return MinimaxBot()
elif bot_type == const.ZHIPU_AI:
from bot.zhipuai.zhipuai_bot import ZHIPUAIBot
return ZHIPUAIBot()

elif bot_type == const.MOONSHOT:
from bot.moonshot.moonshot_bot import MoonshotBot
return MoonshotBot()

raise RuntimeError
elif bot_type == const.MiniMax:
from bot.minimax.minimax_bot import MinimaxBot
return MinimaxBot()

elif bot_type == const.OLLAMA:
from bot.ollama.ollama_bot import OllamaBot
logger.info("已连接到Ollama本地模型")
return OllamaBot()

logger.error(f"未知的AI模型类型: {bot_type}")
raise RuntimeError(f"未知的AI模型类型: {bot_type}")

except Exception as e:
logger.error(f"连接AI模型 {bot_type} 失败: {str(e)}")
raise e
93 changes: 93 additions & 0 deletions bot/ollama/ollama_bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
"""
Ollama bot
"""
import json

import requests

from bot.bot import Bot
from bridge.reply import Reply, ReplyType
from common import const
from common.log import logger
from config import conf


class OllamaBot(Bot):
def __init__(self):
super().__init__()
self.base_url = conf().get("ollama", {}).get("base_url", "http://localhost:11434")
self.model = conf().get("ollama", {}).get("model", "llama2")
self.api_key = conf().get("ollama", {}).get("api_key", "")

def reply(self, query, context=None):
"""
调用Ollama接口生成回复
:param query: 用户输入的消息
:param context: 上下文信息
:return: 回复消息
"""
try:
headers = {
"Content-Type": "application/json"
}
if self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"

# 构建消息历史
messages = []
if context and context.get("messages"):
messages.extend(context.get("messages"))
messages.append({
"role": "user",
"content": query
})

# 准备请求数据
data = {
"model": self.model,
"messages": messages,
"stream": False
}

# 打印请求信息
logger.info(f"Ollama API 请求URL: {self.base_url}/api/chat")
logger.info(f"Ollama API 请求头: {json.dumps(headers, ensure_ascii=False, indent=2)}")
logger.info(f"Ollama API 请求数据: {json.dumps(data, ensure_ascii=False, indent=2)}")

# 发送请求
response = requests.post(
f"{self.base_url}/api/chat",
headers=headers,
json=data,
timeout=120
)

# 打印响应信息
logger.info(f"Ollama API 响应状态码: {response.status_code}")
logger.info(f"Ollama API 响应头: {json.dumps(dict(response.headers), ensure_ascii=False, indent=2)}")

if response.status_code == 200:
resp_json = response.json()
logger.info(f"Ollama API 响应内容: {json.dumps(resp_json, ensure_ascii=False, indent=2)}")
reply = Reply(
ReplyType.TEXT,
resp_json.get("message", {}).get("content", "")
)
return reply
else:
error_msg = f"Ollama API 请求失败,状态码:{response.status_code}, 响应:{response.text}"
logger.error(error_msg)
return error_msg

except Exception as e:
logger.error(f"Ollama API 异常:{e}")
return f"Ollama API 异常:{e}"

def reply_text(self, query, context=None):
"""
回复消息,返回文本
:param query: 用户输入的消息
:param context: 上下文信息
:return: 回复消息
"""
return self.reply(query, context)
3 changes: 3 additions & 0 deletions common/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,3 +97,6 @@
# channel
FEISHU = "feishu"
DINGTALK = "dingtalk"

# ollama
OLLAMA = "ollama"
77 changes: 41 additions & 36 deletions config-template.json
Original file line number Diff line number Diff line change
@@ -1,37 +1,42 @@
{
"channel_type": "wx",
"model": "",
"open_ai_api_key": "YOUR API KEY",
"claude_api_key": "YOUR API KEY",
"text_to_image": "dall-e-2",
"voice_to_text": "openai",
"text_to_voice": "openai",
"proxy": "",
"hot_reload": false,
"single_chat_prefix": [
"bot",
"@bot"
],
"single_chat_reply_prefix": "[bot] ",
"group_chat_prefix": [
"@bot"
],
"group_name_white_list": [
"ChatGPT测试群",
"ChatGPT测试群2"
],
"image_create_prefix": [
"画"
],
"speech_recognition": true,
"group_speech_recognition": false,
"voice_reply_voice": false,
"conversation_max_tokens": 2500,
"expires_in_seconds": 3600,
"character_desc": "你是基于大语言模型的AI智能助手,旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
"temperature": 0.7,
"subscribe_msg": "感谢您的关注!\n这里是AI智能助手,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。",
"use_linkai": false,
"linkai_api_key": "",
"linkai_app_code": ""
}
"channel_type": "wx",
"model": "",
"bot_type": "ollama",
"open_ai_api_key": "YOUR API KEY",
"claude_api_key": "YOUR API KEY",
"text_to_image": "dall-e-2",
"voice_to_text": "openai",
"text_to_voice": "openai",
"proxy": "",
"hot_reload": false,
"single_chat_prefix": [
"bot",
"@bot"
],
"single_chat_reply_prefix": "[bot] ",
"group_chat_prefix": [
"@bot"
],
"group_name_white_list": [
"ChatGPT测试群",
"ChatGPT测试群2"
],
"image_create_prefix": [
"画"
],
"speech_recognition": true,
"group_speech_recognition": false,
"voice_reply_voice": false,
"conversation_max_tokens": 2500,
"expires_in_seconds": 3600,
"character_desc": "你是基于大语言模型的AI智能助手,旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
"temperature": 0.7,
"subscribe_msg": "感谢您的关注!\n这里是AI智能助手,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。",
"use_linkai": false,
"linkai_api_key": "",
"linkai_app_code": "",
"ollama": {
"model": "llama3.2",
"base_url": "http://localhost:11434"
}
}
13 changes: 8 additions & 5 deletions config.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# encoding:utf-8

import copy
import json
import logging
import os
import pickle
import copy

from common.log import logger

Expand Down Expand Up @@ -45,7 +45,7 @@
# Azure OpenAI DALL-E API 配置, 当use_azure_chatgpt为true时,用于将文字回复的资源和Dall-E的资源分开.
"azure_openai_dalle_api_base": "", # [可选] azure openai 用于回复图片的资源 endpoint,默认使用 open_ai_api_base
"azure_openai_dalle_api_key": "", # [可选] azure openai 用于回复图片的资源 key,默认使用 open_ai_api_key
"azure_openai_dalle_deployment_id":"", # [可选] azure openai 用于回复图片的资源 deployment id,默认使用 text_to_image
"azure_openai_dalle_deployment_id": "", # [可选] azure openai 用于回复图片的资源 deployment id,默认使用 text_to_image
"image_proxy": True, # 是否需要图片代理,国内访问LinkAI时需要
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序
Expand Down Expand Up @@ -149,10 +149,10 @@
"feishu_token": "", # 飞书 verification token
"feishu_bot_name": "", # 飞书机器人的名字
# 钉钉配置
"dingtalk_client_id": "", # 钉钉机器人Client ID
"dingtalk_client_id": "", # 钉钉机器人Client ID
"dingtalk_client_secret": "", # 钉钉机器人Client Secret
"dingtalk_card_enabled": False,

# chatgpt指令自定义触发词
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头
# channel配置
Expand All @@ -179,7 +179,10 @@
"Minimax_api_key": "",
"Minimax_group_id": "",
"Minimax_base_url": "",
}
"ollama": {
"model": "llama3.2",
"base_url": "http://localhost:11434"
}}


class Config(dict):
Expand Down