From 271ed0b7e0c4900272848fa6c0cfe021c501fbab Mon Sep 17 00:00:00 2001 From: wood chen <95951386+woodchen-ink@users.noreply.github.com> Date: Sun, 28 Jan 2024 01:50:05 +0800 Subject: [PATCH] =?UTF-8?q?=E6=81=A2=E5=A4=8D=E8=A7=92=E8=89=B2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- code/config.yaml | 38 ----------- code/handlers/card_common_action.go | 3 +- code/handlers/card_role_action.go | 75 ++++++++++++++++++++++ code/handlers/event_common_action.go | 43 ++++++++++++- code/handlers/handler.go | 2 + code/handlers/msg.go | 96 ++++++++++++++++++++++++++-- code/initialization/config.go | 5 +- code/services/openai/gpt3.go | 25 +++++++- 8 files changed, 235 insertions(+), 52 deletions(-) delete mode 100644 code/config.yaml create mode 100644 code/handlers/card_role_action.go diff --git a/code/config.yaml b/code/config.yaml deleted file mode 100644 index 8df530b..0000000 --- a/code/config.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# 是否启用日志。 -ENABLE_LOG: true -# 飞书 -APP_ID: cli_axxx -APP_SECRET: xxx -APP_ENCRYPT_KEY: xxx -APP_VERIFICATION_TOKEN: xxx -# 请确保和飞书应用管理平台中的设置一致。这里建议直接用 Feishu-OpenAI-Stream-Chatbot 作为机器人名称,这样的话,如果你有多个bot就好区分 -BOT_NAME: xxx -# openAI key 支持负载均衡 可以填写多个key 用逗号分隔 -OPENAI_KEY: sk-xxx,sk-xxx,sk-xxx -# 服务器配置 -HTTP_PORT: 9000 -HTTPS_PORT: 9001 -USE_HTTPS: false -CERT_FILE: cert.pem -KEY_FILE: key.pem -# openai 地址, 一般不需要修改, 除非你有自己的反向代理 -API_URL: https://oapi.czl.net -# 代理设置, 例如 "http://127.0.0.1:7890", ""代表不使用代理 -HTTP_PROXY: "" -# 访问OpenAi的 普通 Http请求的超时时间,单位秒,不配置的话默认为 550 秒 -OPENAI_HTTP_CLIENT_TIMEOUT: -# openai 指定模型, 更多见 https://platform.openai.com/docs/models/model-endpoint-compatibility 中 /v1/chat/completions -OPENAI_MODEL: gpt-3.5-turbo - -# AZURE OPENAI -AZURE_ON: false # set to true to use Azure rather than OpenAI -AZURE_API_VERSION: 2023-03-15-preview # 2023-03-15-preview or 2022-12-01 refer https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#completions -AZURE_RESOURCE_NAME: xxxx # you can find in endpoint url. Usually looks like https://{RESOURCE_NAME}.openai.azure.com -AZURE_DEPLOYMENT_NAME: xxxx # usually looks like ...openai.azure.com/openai/deployments/{DEPLOYMENT_NAME}/chat/completions. -AZURE_OPENAI_TOKEN: xxxx # Authentication key. We can use Azure Active Directory Authentication(TBD). - -## 访问控制 -# 是否启用访问控制。默认不启用。 -ACCESS_CONTROL_ENABLE: false -# 每个用户每天最多问多少个问题。默认为不限制. 配置成为小于等于0表示不限制。 -ACCESS_CONTROL_MAX_COUNT_PER_USER_PER_DAY: 0 diff --git a/code/handlers/card_common_action.go b/code/handlers/card_common_action.go index 6af2e63..0ebbd16 100644 --- a/code/handlers/card_common_action.go +++ b/code/handlers/card_common_action.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - larkcard "github.com/larksuite/oapi-sdk-go/v3/card" ) @@ -21,6 +20,8 @@ func NewCardHandler(m MessageHandler) CardHandlerFunc { NewPicResolutionHandler, NewPicTextMoreHandler, NewPicModeChangeHandler, + NewRoleTagCardHandler, + NewRoleCardHandler, } return func(ctx context.Context, cardAction *larkcard.CardAction) (interface{}, error) { diff --git a/code/handlers/card_role_action.go b/code/handlers/card_role_action.go new file mode 100644 index 0000000..bb81156 --- /dev/null +++ b/code/handlers/card_role_action.go @@ -0,0 +1,75 @@ +package handlers + +import ( + "context" + larkcard "github.com/larksuite/oapi-sdk-go/v3/card" + "start-feishubot/initialization" + "start-feishubot/services" + "start-feishubot/services/openai" +) + +func NewRoleTagCardHandler(cardMsg CardMsg, + m MessageHandler) CardHandlerFunc { + return func(ctx context.Context, cardAction *larkcard.CardAction) (interface{}, error) { + + if cardMsg.Kind == RoleTagsChooseKind { + newCard, err, done := CommonProcessRoleTag(cardMsg, cardAction, + m.sessionCache) + if done { + return newCard, err + } + return nil, nil + } + return nil, ErrNextHandler + } +} + +func NewRoleCardHandler(cardMsg CardMsg, + m MessageHandler) CardHandlerFunc { + return func(ctx context.Context, cardAction *larkcard.CardAction) (interface{}, error) { + + if cardMsg.Kind == RoleChooseKind { + newCard, err, done := CommonProcessRole(cardMsg, cardAction, + m.sessionCache) + if done { + return newCard, err + } + return nil, nil + } + return nil, ErrNextHandler + } +} + +func CommonProcessRoleTag(msg CardMsg, cardAction *larkcard.CardAction, + cache services.SessionServiceCacheInterface) (interface{}, + error, bool) { + option := cardAction.Action.Option + //replyMsg(context.Background(), "已选择tag:"+option, + // &msg.MsgId) + roles := initialization.GetTitleListByTag(option) + //fmt.Printf("roles: %s", roles) + SendRoleListCard(context.Background(), &msg.SessionId, + &msg.MsgId, option, *roles) + return nil, nil, true +} + +func CommonProcessRole(msg CardMsg, cardAction *larkcard.CardAction, + cache services.SessionServiceCacheInterface) (interface{}, + error, bool) { + option := cardAction.Action.Option + contentByTitle, error := initialization.GetFirstRoleContentByTitle(option) + if error != nil { + return nil, error, true + } + cache.Clear(msg.SessionId) + systemMsg := append([]openai.Messages{}, openai.Messages{ + Role: "system", Content: contentByTitle, + }) + cache.SetMsg(msg.SessionId, systemMsg) + //pp.Println("systemMsg: ", systemMsg) + sendSystemInstructionCard(context.Background(), &msg.SessionId, + &msg.MsgId, contentByTitle) + //replyMsg(context.Background(), "已选择角色:"+contentByTitle, + // &msg.MsgId) + return nil, nil, true +} diff --git a/code/handlers/event_common_action.go b/code/handlers/event_common_action.go index 631f002..ab5e875 100644 --- a/code/handlers/event_common_action.go +++ b/code/handlers/event_common_action.go @@ -3,9 +3,10 @@ package handlers import ( "context" "fmt" - "start-feishubot/utils" - larkim "github.com/larksuite/oapi-sdk-go/v3/service/im/v1" + "start-feishubot/initialization" + "start-feishubot/services/openai" + "start-feishubot/utils" ) type MsgInfo struct { @@ -85,6 +86,24 @@ func (*ClearAction) Execute(a *ActionInfo) bool { return true } +type RolePlayAction struct { /*角色扮演*/ +} + +func (*RolePlayAction) Execute(a *ActionInfo) bool { + if system, foundSystem := utils.EitherCutPrefix(a.info.qParsed, + "/system ", "角色扮演 "); foundSystem { + a.handler.sessionCache.Clear(*a.info.sessionId) + systemMsg := append([]openai.Messages{}, openai.Messages{ + Role: "system", Content: system, + }) + a.handler.sessionCache.SetMsg(*a.info.sessionId, systemMsg) + sendSystemInstructionCard(*a.ctx, a.info.sessionId, + a.info.msgId, system) + return false + } + return true +} + type HelpAction struct { /*帮助*/ } @@ -113,3 +132,23 @@ func (*BalanceAction) Execute(a *ActionInfo) bool { } return true } + +type RoleListAction struct { /*角色列表*/ +} + +func (*RoleListAction) Execute(a *ActionInfo) bool { + if _, foundSystem := utils.EitherTrimEqual(a.info.qParsed, + "/roles", "角色列表"); foundSystem { + //a.handler.sessionCache.Clear(*a.info.sessionId) + //systemMsg := append([]openai.Messages{}, openai.Messages{ + // Role: "system", Content: system, + //}) + //a.handler.sessionCache.SetMsg(*a.info.sessionId, systemMsg) + //sendSystemInstructionCard(*a.ctx, a.info.sessionId, + // a.info.msgId, system) + tags := initialization.GetAllUniqueTags() + SendRoleTagsCard(*a.ctx, a.info.sessionId, a.info.msgId, *tags) + return false + } + return true +} diff --git a/code/handlers/handler.go b/code/handlers/handler.go index 1defddf..d9a0ea8 100644 --- a/code/handlers/handler.go +++ b/code/handlers/handler.go @@ -95,7 +95,9 @@ func (m MessageHandler) msgReceivedHandler(ctx context.Context, event *larkim.P2 &ProcessMentionAction{}, //判断机器人是否应该被调用 &EmptyAction{}, //空消息处理 &ClearAction{}, //清除消息处理 + &RoleListAction{}, //角色列表处理 &HelpAction{}, //帮助处理 + &RolePlayAction{}, //角色扮演处理 &MessageAction{ chatgpt: chatgpt.NewGpt3(&m.config), }, //消息处理 diff --git a/code/handlers/msg.go b/code/handlers/msg.go index 49d7345..7ef227e 100644 --- a/code/handlers/msg.go +++ b/code/handlers/msg.go @@ -19,11 +19,13 @@ type CardKind string type CardChatType string var ( - ClearCardKind = CardKind("clear") // 清空上下文 - PicModeChangeKind = CardKind("pic_mode_change") // 切换图片创作模式 - PicResolutionKind = CardKind("pic_resolution") // 图片分辨率调整 - PicTextMoreKind = CardKind("pic_text_more") // 重新根据文本生成图片 - PicVarMoreKind = CardKind("pic_var_more") // 变量图片 + ClearCardKind = CardKind("clear") // 清空上下文 + PicModeChangeKind = CardKind("pic_mode_change") // 切换图片创作模式 + PicResolutionKind = CardKind("pic_resolution") // 图片分辨率调整 + PicTextMoreKind = CardKind("pic_text_more") // 重新根据文本生成图片 + PicVarMoreKind = CardKind("pic_var_more") // 变量图片 + RoleTagsChooseKind = CardKind("role_tags_choose") // 内置角色所属标签选择 + RoleChooseKind = CardKind("role_choose") // 内置角色选择 ) var ( @@ -399,6 +401,59 @@ func withPicResolutionBtn(sessionID *string) larkcard. Build() return actions } +func withRoleTagsBtn(sessionID *string, tags ...string) larkcard. + MessageCardElement { + var menuOptions []MenuOption + + for _, tag := range tags { + menuOptions = append(menuOptions, MenuOption{ + label: tag, + value: tag, + }) + } + cancelMenu := newMenu("选择角色分类", + map[string]interface{}{ + "value": "0", + "kind": RoleTagsChooseKind, + "sessionId": *sessionID, + "msgId": *sessionID, + }, + menuOptions..., + ) + + actions := larkcard.NewMessageCardAction(). + Actions([]larkcard.MessageCardActionElement{cancelMenu}). + Layout(larkcard.MessageCardActionLayoutFlow.Ptr()). + Build() + return actions +} + +func withRoleBtn(sessionID *string, titles ...string) larkcard. + MessageCardElement { + var menuOptions []MenuOption + + for _, tag := range titles { + menuOptions = append(menuOptions, MenuOption{ + label: tag, + value: tag, + }) + } + cancelMenu := newMenu("查看内置角色", + map[string]interface{}{ + "value": "0", + "kind": RoleChooseKind, + "sessionId": *sessionID, + "msgId": *sessionID, + }, + menuOptions..., + ) + + actions := larkcard.NewMessageCardAction(). + Actions([]larkcard.MessageCardActionElement{cancelMenu}). + Layout(larkcard.MessageCardActionLayoutFlow.Ptr()). + Build() + return actions +} func replyMsg(ctx context.Context, msg string, msgId *string) error { msg, i := processMessage(msg) @@ -590,6 +645,15 @@ func sendClearCacheCheckCard(ctx context.Context, replyCard(ctx, msgId, newCard) } +func sendSystemInstructionCard(ctx context.Context, + sessionId *string, msgId *string, content string) { + newCard, _ := newSendCard( + withHeader("🥷 已进入角色扮演模式", larkcard.TemplateIndigo), + withMainText(content), + withNote("请注意,这将开始一个全新的对话,您将无法利用之前话题的历史信息")) + replyCard(ctx, msgId, newCard) +} + func sendOnProcessCard(ctx context.Context, sessionId *string, msgId *string) (*string, error) { newCard, _ := newSendCardWithOutHeader( @@ -630,7 +694,7 @@ func sendHelpCard(ctx context.Context, sessionId *string, msgId *string) { newCard, _ := newSendCard( withHeader("🎒需要帮助吗?", larkcard.TemplateBlue), - withMainMd("**我是具备打字机效果的Oapi飞书聊天机器人**"), + withMainMd("**我是具备打字机效果的聊天机器人!**"), withSplitLine(), withMdAndExtraBtn( "** 🆑 清除话题上下文**\n文本回复 *清除* 或 */clear*", @@ -640,6 +704,8 @@ func sendHelpCard(ctx context.Context, "chatType": UserChatType, "sessionId": *sessionId, }, larkcard.MessageCardButtonTypeDanger)), + withMainMd("🛖 **内置角色列表** \n"+" 文本回复 *角色列表* 或 */roles*"), + withMainMd("🥷 **角色扮演模式**\n文本回复*角色扮演* 或 */system*+空格+角色信息"), withSplitLine(), withMainMd("🎒 **需要更多帮助**\n文本回复 *帮助* 或 */help*"), ) @@ -678,3 +744,21 @@ func sendBalanceCard(ctx context.Context, msgId *string, ) replyCard(ctx, msgId, newCard) } + +func SendRoleTagsCard(ctx context.Context, + sessionId *string, msgId *string, roleTags []string) { + newCard, _ := newSendCard( + withHeader("🛖 请选择角色类别", larkcard.TemplateIndigo), + withRoleTagsBtn(sessionId, roleTags...), + withNote("提醒:选择角色所属分类,以便我们为您推荐更多相关角色。")) + replyCard(ctx, msgId, newCard) +} + +func SendRoleListCard(ctx context.Context, + sessionId *string, msgId *string, roleTag string, roleList []string) { + newCard, _ := newSendCard( + withHeader("🛖 角色列表"+" - "+roleTag, larkcard.TemplateIndigo), + withRoleBtn(sessionId, roleList...), + withNote("提醒:选择内置场景,快速进入角色扮演模式。")) + replyCard(ctx, msgId, newCard) +} diff --git a/code/initialization/config.go b/code/initialization/config.go index 0c64d34..8e2079e 100644 --- a/code/initialization/config.go +++ b/code/initialization/config.go @@ -2,13 +2,12 @@ package initialization import ( "fmt" + "github.com/spf13/pflag" "os" "strconv" "strings" "sync" - "github.com/spf13/pflag" - "github.com/spf13/viper" ) @@ -86,7 +85,7 @@ func LoadConfig(cfg string) *Config { UseHttps: getViperBoolValue("USE_HTTPS", false), CertFile: getViperStringValue("CERT_FILE", "cert.pem"), KeyFile: getViperStringValue("KEY_FILE", "key.pem"), - OpenaiApiUrl: getViperStringValue("API_URL", "https://oapi.czl.net"), + OpenaiApiUrl: getViperStringValue("API_URL", "https://api.openai.com"), HttpProxy: getViperStringValue("HTTP_PROXY", ""), AzureOn: getViperBoolValue("AZURE_ON", false), AzureApiVersion: getViperStringValue("AZURE_API_VERSION", "2023-03-15-preview"), diff --git a/code/services/openai/gpt3.go b/code/services/openai/gpt3.go index bdb39b8..5ea27bf 100644 --- a/code/services/openai/gpt3.go +++ b/code/services/openai/gpt3.go @@ -5,8 +5,29 @@ import ( ) const ( - maxTokens = 2000 - temperature = 0.7 + Fresh AIMode = 0.1 + Warmth AIMode = 0.4 + Balance AIMode = 0.7 + Creativity AIMode = 1.0 +) + +var AIModeMap = map[string]AIMode{ + "清新": Fresh, + "温暖": Warmth, + "平衡": Balance, + "创意": Creativity, +} + +var AIModeStrs = []string{ + "清新", + "温暖", + "平衡", + "创意", +} + +const ( + maxTokens = 4096 + engine = "gpt-4-0613" ) type Messages struct {