支持模型名称包含silent_search来静默搜索不输出搜索过程

This commit is contained in:
Vinlic 2024-03-20 01:37:25 +08:00
parent b8134a64a5
commit 909796bd91
4 changed files with 31 additions and 18 deletions

View File

@ -201,6 +201,8 @@ Authorization: Bearer [refresh_token]
请求数据: 请求数据:
```json ```json
{ {
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search
"model": "kimi",
"messages": [ "messages": [
{ {
"role": "user", "role": "user",
@ -254,6 +256,8 @@ Authorization: Bearer [refresh_token]
请求数据: 请求数据:
```json ```json
{ {
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search
"model": "kimi",
"messages": [ "messages": [
{ {
"role": "user", "role": "user",
@ -318,6 +322,8 @@ Authorization: Bearer [refresh_token]
请求数据: 请求数据:
```json ```json
{ {
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search
"model": "kimi",
"messages": [ "messages": [
{ {
"role": "user", "role": "user",

View File

@ -1,6 +1,6 @@
{ {
"name": "kimi-free-api", "name": "kimi-free-api",
"version": "0.0.15", "version": "0.0.16",
"description": "Kimi Free API Server", "description": "Kimi Free API Server",
"type": "module", "type": "module",
"main": "dist/index.js", "main": "dist/index.js",

View File

@ -164,12 +164,13 @@ async function removeConversation(convId: string, refreshToken: string) {
/** /**
* *
* *
* @param model
* @param messages gpt系列消息格式 * @param messages gpt系列消息格式
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
* @param useSearch * @param useSearch
* @param retryCount * @param retryCount
*/ */
async function createCompletion(messages: any[], refreshToken: string, useSearch = true, retryCount = 0) { async function createCompletion(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) {
return (async () => { return (async () => {
logger.info(messages); logger.info(messages);
@ -204,7 +205,7 @@ async function createCompletion(messages: any[], refreshToken: string, useSearch
const streamStartTime = util.timestamp(); const streamStartTime = util.timestamp();
// 接收流为输出文本 // 接收流为输出文本
const answer = await receiveStream(convId, result.data); const answer = await receiveStream(model, convId, result.data);
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`); logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略 // 异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
@ -219,7 +220,7 @@ async function createCompletion(messages: any[], refreshToken: string, useSearch
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`); logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => { return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY)); await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletion(messages, refreshToken, useSearch, retryCount + 1); return createCompletion(model, messages, refreshToken, useSearch, retryCount + 1);
})(); })();
} }
throw err; throw err;
@ -229,12 +230,13 @@ async function createCompletion(messages: any[], refreshToken: string, useSearch
/** /**
* *
* *
* @param model
* @param messages gpt系列消息格式 * @param messages gpt系列消息格式
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
* @param useSearch * @param useSearch
* @param retryCount * @param retryCount
*/ */
async function createCompletionStream(messages: any[], refreshToken: string, useSearch = true, retryCount = 0) { async function createCompletionStream(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) {
return (async () => { return (async () => {
logger.info(messages); logger.info(messages);
@ -268,7 +270,7 @@ async function createCompletionStream(messages: any[], refreshToken: string, use
}); });
const streamStartTime = util.timestamp(); const streamStartTime = util.timestamp();
// 创建转换流将消息格式转换为gpt兼容格式 // 创建转换流将消息格式转换为gpt兼容格式
return createTransStream(convId, result.data, () => { return createTransStream(model, convId, result.data, () => {
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`); logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 流传输结束后异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略 // 流传输结束后异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
removeConversation(convId, refreshToken) removeConversation(convId, refreshToken)
@ -281,7 +283,7 @@ async function createCompletionStream(messages: any[], refreshToken: string, use
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`); logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => { return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY)); await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletionStream(messages, refreshToken, useSearch, retryCount + 1); return createCompletionStream(model, messages, refreshToken, useSearch, retryCount + 1);
})(); })();
} }
throw err; throw err;
@ -541,15 +543,16 @@ function checkResult(result: AxiosResponse, refreshToken: string) {
/** /**
* *
* *
* @param model
* @param convId ID * @param convId ID
* @param stream * @param stream
*/ */
async function receiveStream(convId: string, stream: any) { async function receiveStream(model: string, convId: string, stream: any) {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
// 消息初始化 // 消息初始化
const data = { const data = {
id: convId, id: convId,
model: MODEL_NAME, model,
object: 'chat.completion', object: 'chat.completion',
choices: [ choices: [
{ index: 0, message: { role: 'assistant', content: '' }, finish_reason: 'stop' } { index: 0, message: { role: 'assistant', content: '' }, finish_reason: 'stop' }
@ -558,6 +561,7 @@ async function receiveStream(convId: string, stream: any) {
created: util.unixTimestamp() created: util.unixTimestamp()
}; };
let refContent = ''; let refContent = '';
const silentSearch = model.indexOf('silent_search') != -1;
const parser = createParser(event => { const parser = createParser(event => {
try { try {
if (event.type !== "event") return; if (event.type !== "event") return;
@ -576,7 +580,7 @@ async function receiveStream(convId: string, stream: any) {
resolve(data); resolve(data);
} }
// 处理联网搜索 // 处理联网搜索
else if (result.event == 'search_plus' && result.msg && result.msg.type == 'get_res') else if (!silentSearch && result.event == 'search_plus' && result.msg && result.msg.type == 'get_res')
refContent += `${result.msg.title}(${result.msg.url})\n`; refContent += `${result.msg.title}(${result.msg.url})\n`;
// else // else
// logger.warn(result.event, result); // logger.warn(result.event, result);
@ -598,19 +602,21 @@ async function receiveStream(convId: string, stream: any) {
* *
* gpt兼容流格式 * gpt兼容流格式
* *
* @param model
* @param convId ID * @param convId ID
* @param stream * @param stream
* @param endCallback * @param endCallback
*/ */
function createTransStream(convId: string, stream: any, endCallback?: Function) { function createTransStream(model: string, convId: string, stream: any, endCallback?: Function) {
// 消息创建时间 // 消息创建时间
const created = util.unixTimestamp(); const created = util.unixTimestamp();
// 创建转换流 // 创建转换流
const transStream = new PassThrough(); const transStream = new PassThrough();
let searchFlag = false; let searchFlag = false;
const silentSearch = model.indexOf('silent_search') != -1;
!transStream.closed && transStream.write(`data: ${JSON.stringify({ !transStream.closed && transStream.write(`data: ${JSON.stringify({
id: convId, id: convId,
model: MODEL_NAME, model,
object: 'chat.completion.chunk', object: 'chat.completion.chunk',
choices: [ choices: [
{ index: 0, delta: { role: 'assistant', content: '' }, finish_reason: null } { index: 0, delta: { role: 'assistant', content: '' }, finish_reason: null }
@ -628,7 +634,7 @@ function createTransStream(convId: string, stream: any, endCallback?: Function)
if (result.event == 'cmpl') { if (result.event == 'cmpl') {
const data = `data: ${JSON.stringify({ const data = `data: ${JSON.stringify({
id: convId, id: convId,
model: MODEL_NAME, model,
object: 'chat.completion.chunk', object: 'chat.completion.chunk',
choices: [ choices: [
{ index: 0, delta: { content: (searchFlag ? '\n' : '') + result.text }, finish_reason: null } { index: 0, delta: { content: (searchFlag ? '\n' : '') + result.text }, finish_reason: null }
@ -643,7 +649,7 @@ function createTransStream(convId: string, stream: any, endCallback?: Function)
else if (result.event == 'all_done' || result.event == 'error') { else if (result.event == 'all_done' || result.event == 'error') {
const data = `data: ${JSON.stringify({ const data = `data: ${JSON.stringify({
id: convId, id: convId,
model: MODEL_NAME, model,
object: 'chat.completion.chunk', object: 'chat.completion.chunk',
choices: [ choices: [
{ {
@ -660,12 +666,12 @@ function createTransStream(convId: string, stream: any, endCallback?: Function)
endCallback && endCallback(); endCallback && endCallback();
} }
// 处理联网搜索 // 处理联网搜索
else if (result.event == 'search_plus' && result.msg && result.msg.type == 'get_res') { else if (!silentSearch && result.event == 'search_plus' && result.msg && result.msg.type == 'get_res') {
if (!searchFlag) if (!searchFlag)
searchFlag = true; searchFlag = true;
const data = `data: ${JSON.stringify({ const data = `data: ${JSON.stringify({
id: convId, id: convId,
model: MODEL_NAME, model,
object: 'chat.completion.chunk', object: 'chat.completion.chunk',
choices: [ choices: [
{ {

View File

@ -19,15 +19,16 @@ export default {
const tokens = chat.tokenSplit(request.headers.authorization); const tokens = chat.tokenSplit(request.headers.authorization);
// 随机挑选一个refresh_token // 随机挑选一个refresh_token
const token = _.sample(tokens); const token = _.sample(tokens);
const model = request.body.model;
const messages = request.body.messages; const messages = request.body.messages;
if (request.body.stream) { if (request.body.stream) {
const stream = await chat.createCompletionStream(request.body.messages, token, request.body.use_search); const stream = await chat.createCompletionStream(model, messages, token, request.body.use_search);
return new Response(stream, { return new Response(stream, {
type: "text/event-stream" type: "text/event-stream"
}); });
} }
else else
return await chat.createCompletion(messages, token, request.body.use_search); return await chat.createCompletion(model, messages, token, request.body.use_search);
} }
} }