Compare commits

..

115 Commits

Author SHA1 Message Date
Vinlic科技
3b8bf35c65
Update README.md 2024-12-30 11:53:44 +08:00
Vinlic科技
d43ce6c41d
Update README.md 2024-12-18 23:01:57 +08:00
Vinlic科技
856c1420b2
Update README.md 2024-12-18 10:10:33 +08:00
Vinlic科技
9f58f1858f
Update README.md 2024-12-17 17:59:40 +08:00
Vinlic
1375e7c6c4 支持探索版和K1模型,修复图像解析能力 2024-12-17 17:08:09 +08:00
Vinlic科技
1dd7f89cbc
Update README.md 2024-12-14 02:06:35 +08:00
Vinlic科技
cc198adfbd
Merge pull request #149 from Alex-Yanggg/patch-2
Update README_EN.md
2024-12-12 11:23:24 +08:00
Vinlic科技
06119ed0a1
Merge pull request #150 from Alex-Yanggg/patch-1
Update README.md
2024-12-12 11:23:03 +08:00
Alex
e1a68de548
Update README_EN.md
修正目录层级关系
2024-12-12 14:20:50 +11:00
Alex
22b3a4b3b7
Update README.md
修正目录层级关系
2024-12-12 14:20:11 +11:00
Vinlic科技
c8bb4e64ee
Merge pull request #148 from Alex-Yanggg/master
Update Readme for English and Chinese
2024-12-12 10:49:07 +08:00
Alex
3e8efee623
Update README.md
Update README.md
2024-12-12 13:45:02 +11:00
Alex
63e6189c63
Update README_EN.md
24-12-12
2024-12-12 13:37:41 +11:00
Vinlic科技
f133a9bc01
Update README.md 2024-12-04 17:04:17 +08:00
Vinlic科技
7bc0585352
Update README.md 2024-12-04 17:03:59 +08:00
Vinlic科技
38d59e20fc
Merge pull request #141 from Yanyutin753/优化web搜索输出
🦄 优化web搜索输出
2024-10-27 21:12:00 +08:00
Clivia
bc37f56d1f 🦄 优化web搜索输出
🦄 优化web搜索输出
2024-10-27 20:13:20 +08:00
Vinlic
b2ae0554ee Release 0.0.34 2024-07-12 11:02:43 +08:00
Vinlic
399de41b61 补充请求新增项 2024-07-12 11:02:36 +08:00
Vinlic科技
44316372a5
Update README.md 2024-05-15 13:35:10 +08:00
Vinlic科技
fcc2c9f795
Update README.md 2024-05-10 15:52:03 +08:00
Vinlic科技
a626a78553
Update README.md 2024-05-10 15:51:38 +08:00
Vinlic科技
5f2eb803aa
Update README.md 2024-05-04 17:06:02 +08:00
Vinlic科技
684aedae1c
Update README.md 2024-05-04 17:05:50 +08:00
Vinlic
984d724367 Release 0.0.33 2024-04-30 11:54:17 +08:00
Vinlic
889c874264 支持kimi+智能体调用 2024-04-30 11:53:51 +08:00
Vinlic
6105410dd2 Release 0.0.32 2024-04-28 17:54:44 +08:00
Vinlic
01ff5c250a 处理首轮传送文件时导致对话合并问题 2024-04-28 17:54:31 +08:00
Vinlic
82a8359634 Release 0.0.31 2024-04-28 14:16:27 +08:00
Vinlic
7275ab7e11 Merge branch 'master' of https://github.com/LLM-Red-Team/kimi-free-api 2024-04-28 14:14:28 +08:00
Vinlic
d862808226 update README 2024-04-28 14:14:13 +08:00
Vinlic
7cc6033201 支持原生多轮对话 2024-04-28 14:11:16 +08:00
Vinlic科技
8f72c5de78
Merge pull request #89 from KPCOFGS/master
更新了英文的README_EN.md
2024-04-28 03:29:25 +08:00
Shixian Sheng
72df4e1fc1
Update README_EN.md 2024-04-27 12:18:13 -04:00
Vinlic科技
9b00be5883
Update README.md 2024-04-26 16:49:17 +08:00
Vinlic科技
61cc3a4655
Update README.md 2024-04-26 16:48:30 +08:00
Vinlic科技
1aa45264f1
Merge pull request #86 from Yanyutin753/main
Create sync.yml to update code
2024-04-25 15:17:58 +08:00
Yanyutin753
56caa486c8 Create sync.yml to update code 2024-04-25 15:14:06 +08:00
Vinlic
2aa6465a36 Release 0.0.30 2024-04-25 10:49:26 +08:00
Vinlic
09250f208a Merge branch 'master' of https://github.com/LLM-Red-Team/kimi-free-api 2024-04-25 10:49:17 +08:00
Vinlic
a2d5ab9390 修复某些大文件无法正常上传处理问题 2024-04-25 10:47:57 +08:00
Vinlic科技
fe584180b1
Merge pull request #84 from KPCOFGS/master
更新了中英文README.md文件
2024-04-25 08:59:42 +08:00
Shi Sheng
c1c601b498
Update README_EN.md 2024-04-24 20:51:11 -04:00
Shi Sheng
b9caca3289
Update README.md 2024-04-24 15:41:33 -04:00
Shi Sheng
2b32fc66f4
Update README_EN.md 2024-04-24 15:36:38 -04:00
Shi Sheng
bffd5a24a3
Update README_EN.md 2024-04-24 15:22:32 -04:00
Vinlic科技
95f8c4e3e3
Merge pull request #83 from KPCOFGS/master
更新了中英文的README文件
2024-04-24 19:53:15 +08:00
Shi Sheng
0632d8111e
Update README.md 2024-04-24 07:48:16 -04:00
Shi Sheng
f1aa2e822c
Update README_EN.md 2024-04-24 07:48:08 -04:00
Vinlic科技
53436b5f21
Update README.md 2024-04-24 14:33:24 +08:00
Vinlic科技
e8284288c9
Merge pull request #81 from Yanyutin753/tem-main
feat support /v1/models to be better use lobechat
2024-04-24 13:34:13 +08:00
Clivia
04db70bec5
Merge branch 'LLM-Red-Team:master' into tem-main 2024-04-24 13:31:23 +08:00
Yanyutin753
f7c1fa7be3 feat support /v1/models to be better use lobechat 2024-04-24 13:30:46 +08:00
Vinlic科技
b9d479b9f6
Merge pull request #80 from KPCOFGS/master
更新了中英文的README文件
2024-04-24 13:29:41 +08:00
Shi Sheng
c9c26fdd31
Update README_EN.md 2024-04-23 08:26:15 -04:00
Shi Sheng
43e14b6e3e
Update README.md 2024-04-23 08:22:38 -04:00
Shi Sheng
65a3fed83b
Update README_EN.md 2024-04-23 08:20:52 -04:00
Shi Sheng
4a225853af
Update README.md 2024-04-23 08:20:28 -04:00
Shi Sheng
6b343f4094
Update README_EN.md 2024-04-23 08:17:49 -04:00
Shi Sheng
e8c6622e83
Update README_EN.md 2024-04-23 08:15:43 -04:00
Shi Sheng
ae6dc4a79f
Update README_EN.md 2024-04-23 08:09:42 -04:00
Shi Sheng
bdb8ced5ce
Update README.md 2024-04-23 08:08:49 -04:00
Shi Sheng
a0c1bba3c9
Update README.md 2024-04-23 08:08:00 -04:00
Shi Sheng
c6da81a53e
Update README.md 2024-04-23 08:06:25 -04:00
Vinlic科技
77d42d9484
Update README.md 2024-04-22 16:43:00 +08:00
Vinlic科技
d73a9bc95d
Merge pull request #72 from XunjunYin/master
Update: README.md typo
2024-04-20 16:51:35 +08:00
Xunjun Yin
65f45697e8
Update: README.md typo 2024-04-20 16:34:55 +08:00
Vinlic
875bb55f21 修补一些请求特征 2024-04-19 16:51:55 +08:00
Vinlic
cbf215d8a8 Merge branch 'master' of https://github.com/LLM-Red-Team/kimi-free-api 2024-04-17 12:24:11 +08:00
Vinlic
7c3bc3c0d8 Release 0.0.27 2024-04-17 12:18:41 +08:00
Vinlic
ae8e8316e4 优化检索引用连接展示,避免url解析错误 2024-04-17 12:18:17 +08:00
Vinlic科技
e1b7e55e70
Update README.md 2024-04-17 01:10:10 +08:00
Vinlic科技
e1710ee95a
Update README.md 2024-04-17 01:00:05 +08:00
Vinlic
d14d062078 Release 0.0.26 2024-04-13 02:14:48 +08:00
Vinlic
1a3327cc8d 修复多轮对话下,无法重复唤起联网检索的问题 2024-04-13 02:14:28 +08:00
Vinlic科技
cfec318bd0
Merge pull request #56 from MichaelYuhe/master
docs: add deploy to Zeabur guide
2024-04-12 15:24:08 +08:00
Yuhang
1d18ac3f6b
add deploy to Zeabur in Readme_en 2024-04-12 15:11:31 +08:00
Yuhang
b52e84bda0
add deploy to Zeabur in Readme 2024-04-12 15:10:35 +08:00
Vinlic
ee7cb9fdff Merge branch 'master' of https://github.com/Vinlic/kimi-free-api 2024-04-12 13:17:46 +08:00
Vinlic
a12a967202 update README 2024-04-12 13:17:23 +08:00
Vinlic科技
bff5623f73
update README 2024-04-11 18:53:03 +08:00
Vinlic
2d2454b65b update README 2024-04-11 15:03:04 +08:00
Vinlic
4642939835 update README 2024-04-11 14:28:32 +08:00
Vinlic
87593a270a 添加Render部署 2024-04-11 14:28:16 +08:00
Vinlic
ce89c29b05 添加Render部署 2024-04-11 14:27:27 +08:00
Vinlic
3bb36fbbf0 Merge branch 'master' of https://github.com/Vinlic/kimi-free-api 2024-04-11 13:55:22 +08:00
Vinlic
3b3584bf4f Release 0.0.25 2024-04-11 13:54:50 +08:00
Vinlic
d1e0fcad2b 支持vercel部署 2024-04-11 13:54:34 +08:00
Vinlic
674647e108 npm切换到yarn,加快容器构建 2024-04-11 13:54:16 +08:00
Vinlic科技
e244052c6a
Merge pull request #54 from khazic/master
新增的
2024-04-11 10:32:52 +08:00
khazic
6ced4e76d2 新增的 2024-04-11 10:25:54 +08:00
Vinlic科技
4a1d39bdd8
Merge pull request #53 from khazic/master
readme 入口
2024-04-11 10:19:19 +08:00
khazic
2cc8c2e13d readme 入口 2024-04-11 10:15:05 +08:00
Vinlic科技
1ab9e980cf
Merge pull request #52 from khazic/master
写了README_EN
2024-04-11 10:13:20 +08:00
khazic
97cc86f718 写了README_EN 2024-04-11 10:09:13 +08:00
Vinlic
d4f6fee14d update README 2024-04-10 18:31:52 +08:00
Vinlic
e157e40525 增加refresh_token存活检测 2024-04-10 18:22:00 +08:00
Vinlic
d08a4b2130 Release 0.0.24 2024-04-10 17:57:16 +08:00
Vinlic
31298c9566 update Dockerfile 2024-04-10 17:56:40 +08:00
Vinlic
fe63c20198 Release 0.0.23 2024-04-09 10:47:40 +08:00
Vinlic
72e29e4168 增加日志提醒错误请求地址 2024-04-09 10:47:28 +08:00
Vinlic
9fd7ae890b 首轮不注入注意力prompt 2024-04-08 22:26:05 +08:00
Vinlic
f5bea5ea68 Release 0.0.22 2024-04-08 22:24:13 +08:00
Vinlic
0b2c8434c9 首轮不注入注意力prompt 2024-04-08 22:23:54 +08:00
Vinlic
520f26f72f Release 0.0.21 2024-04-06 00:16:18 +08:00
Vinlic科技
462c64656e
Merge pull request #42 from Yanyutin753/master
optimize code in messagesPrepare
2024-04-06 00:09:24 +08:00
Yanyutin753
cda36ed4fc fix the position of "\n" 2024-04-05 19:12:47 +08:00
Yanyutin753
70ea39591b optimize code in messagesPrepare 2024-04-05 18:54:04 +08:00
Vinlic
11a145924f 加大文件上传超时时间 2024-04-05 01:16:05 +08:00
Vinlic
1b2b7927ee Release 0.0.20 2024-04-03 00:00:46 +08:00
Vinlic
66cddd522b 修改日志输出和注意力注入prompt 2024-04-02 23:27:38 +08:00
Vinlic科技
ff59201961
Merge pull request #38 from Yanyutin753/transfer
优化降低传文件上下文混淆问题
2024-04-02 23:17:13 +08:00
Yanyutin753
6853087757 优化降低传文件上下文混淆问题 2024-04-02 23:13:00 +08:00
Yanyutin753
1e09d807e6 打印上传消息日志 2024-04-02 21:15:36 +08:00
Yanyutin753
66067b4dd9 通过添加prompt改善传文件时的上下文问题 2024-04-02 20:54:46 +08:00
21 changed files with 2943 additions and 219 deletions

48
.github/workflows/sync.yml vendored Normal file
View File

@ -0,0 +1,48 @@
name: Upstream Sync
permissions:
contents: write
issues: write
actions: write
on:
schedule:
- cron: '0 * * * *' # every hour
workflow_dispatch:
jobs:
sync_latest_from_upstream:
name: Sync latest commits from upstream repo
runs-on: ubuntu-latest
if: ${{ github.event.repository.fork }}
steps:
- uses: actions/checkout@v4
- name: Clean issue notice
uses: actions-cool/issues-helper@v3
with:
actions: 'close-issues'
labels: '🚨 Sync Fail'
- name: Sync upstream changes
id: sync
uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
with:
upstream_sync_repo: LLM-Red-Team/kimi-free-api
upstream_sync_branch: master
target_sync_branch: master
target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
test_mode: false
- name: Sync check
if: failure()
uses: actions-cool/issues-helper@v3
with:
actions: 'create-issue'
title: '🚨 同步失败 | Sync Fail'
labels: '🚨 Sync Fail'
body: |
Due to a change in the workflow file of the LLM-Red-Team/kimi-free-api upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed [Tutorial][tutorial-en-US] for instructions.
由于 LLM-Red-Team/kimi-free-api 上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,

3
.gitignore vendored
View File

@ -1,3 +1,4 @@
dist/ dist/
node_modules/ node_modules/
logs/ logs/
.vercel

View File

@ -4,14 +4,15 @@ WORKDIR /app
COPY . /app COPY . /app
RUN npm i --registry http://registry.npmmirror.com && npm run build RUN yarn install --registry https://registry.npmmirror.com/ && yarn run build
FROM node:lts-alpine FROM node:lts-alpine
COPY --from=BUILD_IMAGE /app/configs ./configs COPY --from=BUILD_IMAGE /app/public /app/public
COPY --from=BUILD_IMAGE /app/package.json ./package.json COPY --from=BUILD_IMAGE /app/configs /app/configs
COPY --from=BUILD_IMAGE /app/dist ./dist COPY --from=BUILD_IMAGE /app/package.json /app/package.json
COPY --from=BUILD_IMAGE /app/node_modules ./node_modules COPY --from=BUILD_IMAGE /app/dist /app/dist
COPY --from=BUILD_IMAGE /app/node_modules /app/node_modules
WORKDIR /app WORKDIR /app

161
README.md
View File

@ -1,43 +1,69 @@
# KIMI AI Free 服务 # KIMI AI Free 服务
![](https://img.shields.io/github/license/llm-red-team/kimi-free-api.svg)
<hr>
<span>[ 中文 | <a href="README_EN.md">English</a> ]</span>
[![](https://img.shields.io/github/license/llm-red-team/kimi-free-api.svg)](LICENSE)
![](https://img.shields.io/github/stars/llm-red-team/kimi-free-api.svg) ![](https://img.shields.io/github/stars/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/github/forks/llm-red-team/kimi-free-api.svg) ![](https://img.shields.io/github/forks/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/docker/pulls/vinlic/kimi-free-api.svg) ![](https://img.shields.io/docker/pulls/vinlic/kimi-free-api.svg)
支持高速流式输出、支持多轮对话、支持联网搜索、支持长文档解读、支持图像解析零配置部署多路token支持自动清理会话痕迹。 支持高速流式输出、支持多轮对话、支持联网搜索、支持智能体对话、支持探索版、支持K1思考模型、支持长文档解读、支持图像解析零配置部署多路token支持自动清理会话痕迹。
与ChatGPT接口完全兼容。 与ChatGPT接口完全兼容。
还有以下个free-api欢迎关注 还有以下个free-api欢迎关注
阶跃星辰 (跃问StepChat) 接口转API [step-free-api](https://github.com/LLM-Red-Team/step-free-api) 阶跃星辰 (跃问StepChat) 接口转API [step-free-api](https://github.com/LLM-Red-Team/step-free-api)
阿里通义 (Qwen) 接口转API [qwen-free-api](https://github.com/LLM-Red-Team/qwen-free-api) 阿里通义 (Qwen) 接口转API [qwen-free-api](https://github.com/LLM-Red-Team/qwen-free-api)
ZhipuAI (智谱清言) 接口转API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api) 智谱AI (智谱清言) 接口转API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
聆心智能 (Emohaa) 接口转API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api) 秘塔AI (Metaso) 接口转API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
字节跳动豆包接口转API [doubao-free-api](https://github.com/LLM-Red-Team/doubao-free-api)
字节跳动即梦AI接口转API [jimeng-free-api](https://github.com/LLM-Red-Team/jimeng-free-api)
讯飞星火Spark接口转API [spark-free-api](https://github.com/LLM-Red-Team/spark-free-api)
MiniMax海螺AI接口转API [hailuo-free-api](https://github.com/LLM-Red-Team/hailuo-free-api)
深度求索DeepSeek接口转API [deepseek-free-api](https://github.com/LLM-Red-Team/deepseek-free-api)
聆心智能 (Emohaa) 接口转API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api)(当前不可用)
## 目录 ## 目录
* [免责声明](#免责声明) * [免责声明](#免责声明)
* [在线体验](#在线体验)
* [效果示例](#效果示例) * [效果示例](#效果示例)
* [接入准备](#接入准备) * [接入准备](#接入准备)
* [多账号接入](#多账号接入) * [多账号接入](#多账号接入)
* [Docker部署](#Docker部署) * [Docker部署](#Docker部署)
* [Docker-compose部署](#Docker-compose部署) * [Docker-compose部署](#Docker-compose部署)
* [Render部署](#Render部署)
* [Vercel部署](#Vercel部署)
* [Zeabur部署](#Zeabur部署)
* [原生部署](#原生部署) * [原生部署](#原生部署)
* [推荐使用客户端](#推荐使用客户端)
* [接口列表](#接口列表) * [接口列表](#接口列表)
* [对话补全](#对话补全) * [对话补全](#对话补全)
* [文档解读](#文档解读) * [文档解读](#文档解读)
* [图像解析](#图像解析) * [图像解析](#图像解析)
* [refresh_token存活检测](#refresh_token存活检测)
* [注意事项](#注意事项) * [注意事项](#注意事项)
* [Nginx反代优化](#Nginx反代优化) * [Nginx反代优化](#Nginx反代优化)
* [Token统计](#Token统计)
* [Star History](#star-history)
## 免责声明 ## 免责声明
**逆向API是不稳定的建议前往MoonshotAI官方 https://platform.moonshot.cn/ 付费使用API避免封禁的风险。**
**本组织和个人不接受任何资金捐助和交易,此项目是纯粹研究交流学习性质!** **本组织和个人不接受任何资金捐助和交易,此项目是纯粹研究交流学习性质!**
**仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!** **仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!**
@ -46,31 +72,31 @@ ZhipuAI (智谱清言) 接口转API [glm-free-api](https://github.com/LLM-Red-Te
**仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!** **仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!**
## 在线体验
此链接仅临时测试功能,不可长期使用,长期使用请自行部署。
https://udify.app/chat/Po0F6BMJ15q5vu2P
## 效果示例 ## 效果示例
### 验明正身 ### 验明正身Demo
![验明正身](./doc/example-1.png) ![验明正身](./doc/example-1.png)
### 多轮对话 ### 多轮对话Demo
![多轮对话](./doc/example-6.png) ![多轮对话](./doc/example-6.png)
### 联网搜索 ### 联网搜索Demo
![联网搜索](./doc/example-2.png) ![联网搜索](./doc/example-2.png)
### 长文档解读 ### 智能体对话Demo
此处使用 [翻译通](https://kimi.moonshot.cn/chat/coo6l3pkqq4ri39f36bg) 智能体。
![智能体对话](./doc/example-7.png)
### 长文档解读Demo
![长文档解读](./doc/example-5.png) ![长文档解读](./doc/example-5.png)
### 图像解析 ### 图像OCR Demo
![图像解析](./doc/example-3.png) ![图像解析](./doc/example-3.png)
@ -100,7 +126,7 @@ https://udify.app/chat/Po0F6BMJ15q5vu2P
## Docker部署 ## Docker部署
请准备一台具有公网IP的服务器并将8000端口开放。 请准备能够部署Docker镜像且能够访问网络的设备或服务器并将8000端口开放。
拉取镜像并启动服务 拉取镜像并启动服务
@ -142,6 +168,39 @@ services:
- TZ=Asia/Shanghai - TZ=Asia/Shanghai
``` ```
### Render部署
**注意部分部署区域可能无法连接kimi如容器日志出现请求超时或无法连接新加坡实测不可用请切换其他区域部署**
**注意免费账户的容器实例将在一段时间不活动时自动停止运行这会导致下次请求时遇到50秒或更长的延迟建议查看[Render容器保活](https://github.com/LLM-Red-Team/free-api-hub/#Render%E5%AE%B9%E5%99%A8%E4%BF%9D%E6%B4%BB)**
1. fork本项目到你的github账号下。
2. 访问 [Render](https://dashboard.render.com/) 并登录你的github账号。
3. 构建你的 Web ServiceNew+ -> Build and deploy from a Git repository -> Connect你fork的项目 -> 选择部署区域 -> 选择实例类型为Free -> Create Web Service
4. 等待构建完成后复制分配的域名并拼接URL访问即可。
### Vercel部署
**注意Vercel免费账户的请求响应超时时间为10秒但接口响应通常较久可能会遇到Vercel返回的504超时错误**
请先确保安装了Node.js环境。
```shell
npm i -g vercel --registry http://registry.npmmirror.com
vercel login
git clone https://github.com/LLM-Red-Team/kimi-free-api
cd kimi-free-api
vercel --prod
```
### Zeabur部署
**注意:免费账户的容器实例可能无法稳定运行**
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/GRFYBP)
## 原生部署 ## 原生部署
请准备一台具有公网IP的服务器并将8000端口开放。 请准备一台具有公网IP的服务器并将8000端口开放。
@ -190,6 +249,14 @@ pm2 reload kimi-free-api
pm2 stop kimi-free-api pm2 stop kimi-free-api
``` ```
## 推荐使用客户端
使用以下二次开发客户端接入free-api系列项目更快更简单支持文档/图像上传!
由 [Clivia](https://github.com/Yanyutin753/lobe-chat) 二次开发的LobeChat [https://github.com/Yanyutin753/lobe-chat](https://github.com/Yanyutin753/lobe-chat)
由 [时光@](https://github.com/SuYxh) 二次开发的ChatGPT Web [https://github.com/SuYxh/chatgpt-web-sea](https://github.com/SuYxh/chatgpt-web-sea)
## 接口列表 ## 接口列表
目前支持与openai兼容的 `/v1/chat/completions` 接口可自行使用与openai或其他兼容的客户端接入接口或者使用 [dify](https://dify.ai/) 等线上服务接入使用。 目前支持与openai兼容的 `/v1/chat/completions` 接口可自行使用与openai或其他兼容的客户端接入接口或者使用 [dify](https://dify.ai/) 等线上服务接入使用。
@ -209,8 +276,19 @@ Authorization: Bearer [refresh_token]
请求数据: 请求数据:
```json ```json
{ {
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search // 模型名称
// kimi默认模型
// kimi-search联网检索模型
// kimi-research探索版模型
// kimi-k1K1模型
// kimi-math数学模型
// kimi-silent不输出检索过程模型
// search/research/k1/math/silent可自由组合使用
// 如果使用kimi+智能体model请填写智能体ID就是浏览器地址栏上尾部的一串英文+数字20个字符的ID
"model": "kimi", "model": "kimi",
// 目前多轮对话基于消息合并实现某些场景可能导致能力下降且受单轮最大Token数限制
// 如果您想获得原生的多轮对话体验可以传入首轮消息获得的id来接续上下文注意如果使用这个首轮必须传none否则第二轮会空响应
// "conversation_id": "cnndivilnl96vah411dg",
"messages": [ "messages": [
{ {
"role": "user", "role": "user",
@ -227,6 +305,7 @@ Authorization: Bearer [refresh_token]
响应数据: 响应数据:
```json ```json
{ {
// 如果想获得原生多轮对话体验此id你可以传入到下一轮对话的conversation_id来接续上下文
"id": "cnndivilnl96vah411dg", "id": "cnndivilnl96vah411dg",
"model": "kimi", "model": "kimi",
"object": "chat.completion", "object": "chat.completion",
@ -264,7 +343,15 @@ Authorization: Bearer [refresh_token]
请求数据: 请求数据:
```json ```json
{ {
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search // 模型名称
// kimi默认模型
// kimi-search联网检索模型
// kimi-research探索版模型
// kimi-k1K1模型
// kimi-math数学模型
// kimi-silent不输出检索过程模型
// search/research/k1/math/silent可自由组合使用
// 如果使用kimi+智能体model请填写智能体ID就是浏览器地址栏上尾部的一串英文+数字20个字符的ID
"model": "kimi", "model": "kimi",
"messages": [ "messages": [
{ {
@ -313,7 +400,7 @@ Authorization: Bearer [refresh_token]
} }
``` ```
### 图像解析 ### 图像OCR
提供一个可访问的图像URL或者BASE64_URL进行解析。 提供一个可访问的图像URL或者BASE64_URL进行解析。
@ -330,7 +417,15 @@ Authorization: Bearer [refresh_token]
请求数据: 请求数据:
```json ```json
{ {
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search // 模型名称
// kimi默认模型
// kimi-search联网检索模型
// kimi-research探索版模型
// kimi-k1K1模型
// kimi-math数学模型
// kimi-silent不输出检索过程模型
// search/research/k1/math/silent可自由组合使用
// 如果使用kimi+智能体model请填写智能体ID就是浏览器地址栏上尾部的一串英文+数字20个字符的ID
"model": "kimi", "model": "kimi",
"messages": [ "messages": [
{ {
@ -379,6 +474,26 @@ Authorization: Bearer [refresh_token]
} }
``` ```
### refresh_token存活检测
检测refresh_token是否存活如果存活live为true否则为false请不要频繁小于10分钟调用此接口。
**POST /token/check**
请求数据:
```json
{
"token": "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9..."
}
```
响应数据:
```json
{
"live": true
}
```
## 注意事项 ## 注意事项
### Nginx反代优化 ### Nginx反代优化
@ -404,4 +519,4 @@ keepalive_timeout 120;
## Star History ## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=LLM-Red-Team/kimi-free-api&type=Date)](https://star-history.com/#LLM-Red-Team/kimi-free-api&Date) [![Star History Chart](https://api.star-history.com/svg?repos=LLM-Red-Team/kimi-free-api&type=Date)](https://star-history.com/#LLM-Red-Team/kimi-free-api&Date)

504
README_EN.md Normal file
View File

@ -0,0 +1,504 @@
# KIMI AI Free Service
<hr>
[![](https://img.shields.io/github/license/llm-red-team/kimi-free-api.svg)](LICENSE)
![](https://img.shields.io/github/stars/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/github/forks/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/docker/pulls/vinlic/kimi-free-api.svg)
Supports high-speed streaming output, multi-turn dialogues, internet search, long document reading, image analysis, zero-configuration deployment, multi-token support, and automatic session trace cleanup.
Fully compatible with the ChatGPT interface.
Also, the following free APIs are available for your attention:
StepFun (StepChat) API to API [step-free-api](https://github.com/LLM-Red-Team/step-free-api)
Ali Tongyi (Qwen) API to API [qwen-free-api](https://github.com/LLM-Red-Team/qwen-free-api)
ZhipuAI (ChatGLM) API to API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
Meta Sota (metaso) API to API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
Iflytek Spark (Spark) API to API [spark-free-api](https://github.com/LLM-Red-Team/spark-free-api)
Lingxin Intelligence (Emohaa) API to API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api) (OUT OF ORDER)
## Table of Contents
* [Announcement](#Announcement)
* [Online experience](#Online-Experience)
* [Effect Examples](#Effect-Examples)
* [Access preparation](#Access-Preparation)
* [Multiple account access](#Multi-Account-Access)
* [Docker Deployment](#Docker-Deployment)
* [Docker-compose Deployment](#Docker-compose-Deployment)
* [Render Deployment](Render-Deployment)
* [Vercel Deployment](#Vercel-Deployment)
* [Zeabur Deployment](#Zeabur-Deployment)
* [Native Deployment](#Native-Deployment)
* [Interface List](#Interface-List)
* [Conversation completion](#conversation-completion)
* [Document Interpretation](#document-interpretation)
* [Image analysis](#image-analysis)
* [refresh_token survival detection](#refresh_token-survival-detection)
* [Precautions](#Precautions)
* [Nginx anti-generation optimization](#Nginx-anti-generation-optimization)
* [Token statistics](#Token-statistics)
* [Star History](#star-history)
## Announcement
**This API is unstable. So we highly recommend you go to the [MoonshotAI](https://platform.moonshot.cn/) use the offical API, avoiding banned.**
**This organization and individuals do not accept any financial donations and transactions. This project is purely for research, communication, and learning purposes!**
**For personal use only, it is forbidden to provide services or commercial use externally to avoid causing service pressure on the official, otherwise, bear the risk yourself!**
**For personal use only, it is forbidden to provide services or commercial use externally to avoid causing service pressure on the official, otherwise, bear the risk yourself!**
**For personal use only, it is forbidden to provide services or commercial use externally to avoid causing service pressure on the official, otherwise, bear the risk yourself!**
## Online Experience
This link is only for temporary testing of functions and cannot be used for a long time. For long-term use, please deploy by yourself.
https://udify.app/chat/Po0F6BMJ15q5vu2P
## Effect Examples
### Identity Verification
![Identity Verification](./doc/example-1.png)
### Multi-turn Dialogue
![Multi-turn Dialogue](./doc/example-6.png)
### Internet Search
![Internet Search](./doc/example-2.png)
### Long Document Reading
![Long Document Reading](./doc/example-5.png)
### Image Analysis
![Image Analysis](./doc/example-3.png)
### Consistent Responsiveness
![Consistent Responsiveness](https://github.com/LLM-Red-Team/kimi-free-api/assets/20235341/48c7ec00-2b03-46c4-95d0-452d3075219b)
## Access Preparation
Get the `refresh_token` from [kimi.moonshot.cn](https://kimi.moonshot.cn)
Start a conversation with kimi at will, then open the developer tool with F12, and find the value of `refresh_token` from Application > Local Storage, which will be used as the value of the Bearer Token in Authorization: `Authorization: Bearer TOKEN`
![example0](./doc/example-0.png)
If you see `refresh_token` as an array, please use `.` to join it before using.
![example8](./doc/example-8.jpg)
### Multi-Account Access
Currently, kimi limits ordinary accounts to only 30 rounds of long-text Q&A within every 3 hours (short text is unlimited). You can provide multiple account refresh_tokens and use `,` to join them:
`Authorization: Bearer TOKEN1,TOKEN2,TOKEN3`
The service will pick one each time a request is made.
## Docker Deployment
Please prepare a server with a public IP and open port 8000.
Pull the image and start the service
```shell
docker run -it -d --init --name kimi-free-api -p 8000:8000 -e TZ=Asia/Shanghai vinlic/kimi-free-api:latest
```
check real-time service logs
```shell
docker logs -f kimi-free-api
```
Restart service
```shell
docker restart kimi-free-api
```
Shut down service
```shell
docker stop kimi-free-api
```
### Docker-compose Deployment
```yaml
version: '3'
services:
kimi-free-api:
container_name: kimi-free-api
image: vinlic/kimi-free-api:latest
restart: always
ports:
- "8000:8000"
environment:
- TZ=Asia/Shanghai
```
### Render Deployment
**Attention: Some deployment regions may not be able to connect to Kimi. If container logs show request timeouts or connection failures (Singapore has been tested and found unavailable), please switch to another deployment region!**
**Attention Container instances for free accounts will automatically stop after a period of inactivity, which may result in a 50-second or longer delay during the next request. It is recommended to check [Render Container Keepalive](https://github.com/LLM-Red-Team/free-api-hub/#Render%E5%AE%B9%E5%99%A8%E4%BF%9D%E6%B4%BB)**
1. Fork this project to your GitHub account.
2. Visit [Render](https://dashboard.render.com/) and log in with your GitHub account.
3. Build your Web Service (New+ -> Build and deploy from a Git repository -> Connect your forked project -> Select deployment region -> Choose instance type as Free -> Create Web Service).
4. After the build is complete, copy the assigned domain and append the URL to access it.
### Vercel Deployment
**Note: Vercel free accounts have a request response timeout of 10 seconds, but interface responses are usually longer, which may result in a 504 timeout error from Vercel!**
Please ensure that Node.js environment is installed first.
```shell
npm i -g vercel --registry http://registry.npmmirror.com
vercel login
git clone https://github.com/LLM-Red-Team/kimi-free-api
cd kimi-free-api
vercel --prod
```
### Zeabur Deployment
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/GRFYBP)
## Native Deployment
Please prepare a server with a public IP and open port 8000.
Please install the Node.js environment and configure the environment variables first, and confirm that the node command is available.
Install dependencies
```shell
npm i
```
Install PM2 for process guarding
```shell
npm i -g pm2
```
Compile and build. When you see the dist directory, the build is complete.
```shell
npm run build
```
Start service
```shell
pm2 start dist/index.js --name "kimi-free-api"
```
View real-time service logs
```shell
pm2 logs kimi-free-api
```
Restart service
```shell
pm2 reload kimi-free-api
```
Shut down service
```shell
pm2 stop kimi-free-api
```
## Recommended Clients
Using the following second-developed clients for free-api series projects is faster and easier, and supports document/image uploads!
[Clivia](https://github.com/Yanyutin753/lobe-chat)'s modified LobeChat [https://github.com/Yanyutin753/lobe-chat](https://github.com/Yanyutin753/lobe-chat)
[Time@](https://github.com/SuYxh)'s modified ChatGPT Web [https://github.com/SuYxh/chatgpt-web-sea](https://github.com/SuYxh/chatgpt-web-sea)
## interface list
Currently, the `/v1/chat/completions` interface compatible with openai is supported. You can use the client access interface compatible with openai or other clients, or use online services such as [dify](https://dify.ai/) Access and use.
### Conversation completion
Conversation completion interface, compatible with openai's [chat-completions-api](https://platform.openai.com/docs/guides/text-generation/chat-completions-api).
**POST /v1/chat/completions**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
// Model name
// kimi: default model
// kimi-search: online search model
// kimi-research: exploration version model
// kimi-k1: K1 model
// kimi-math: math model
// kimi-silent: model without search process output
// search/research/k1/math/silent: can be freely combined
// If using kimi+agent, fill in the agent ID for model, which is the 20-character ID of letters and numbers at the end of the browser address bar
"model": "kimi",
"messages": [
{
"role": "user",
"content": "test"
}
],
// Whether to enable online search, default false
"use_search": true,
// If using SSE stream, please set it to true, the default is false
"stream": false
}
```
Response data:
```json
{
"id": "cnndivilnl96vah411dg",
"model": "kimi",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! I am Kimi, an artificial intelligence assistant developed by Dark Side of the Moon Technology Co., Ltd. I am good at conversation in Chinese and English. I can help you obtain information, answer questions, and read and understand the documents you provide. and web content. If you have any questions or need help, feel free to let me know!"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 1710152062
}
```
### Document interpretation
Provide an accessible file URL or BASE64_URL to parse.
**POST /v1/chat/completions**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
// Model name
// kimi: default model
// kimi-search: online search model
// kimi-research: exploration version model
// kimi-k1: K1 model
// kimi-math: math model
// kimi-silent: model without search process output
// search/research/k1/math/silent: can be freely combined
// If using kimi+agent, fill in the agent ID for model, which is the 20-character ID of letters and numbers at the end of the browser address bar
"model": "kimi",
"messages": [
{
"role": "user",
"content": [
{
"type": "file",
"file_url": {
"url": "https://mj101-1317487292.cos.ap-shanghai.myqcloud.com/ai/test.pdf"
}
},
{
"type": "text",
"text": "What does the document say?"
}
]
}
],
// It is recommended to turn off online search to prevent interference in interpreting results.
"use_search": false
}
```
Response data:
```json
{
"id": "cnmuo7mcp7f9hjcmihn0",
"model": "kimi",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "The document contains several examples of ancient magical spells from magical texts from the ancient Greek and Roman periods known as PGM (Papyri Graecae Magicae). The following are examples of several spells mentioned in the document Contents:\n\n1. The first spell (PMG 4.1390 1495) describes a ritual that requires leaving some of your leftover bread, dividing it into seven small pieces, and then going to the heroes, gladiators, and those who died violent deaths The place where people were killed. Spell a spell on the piece of bread and throw it out, then pick up some contaminated soil from the ritual site and throw it into the home of the woman you like, then go to sleep. The content of the spell is to pray to the goddess of fate (Moirai), The Roman goddesses of Fates and the forces of nature (Daemons) were invoked to help make wishes come true.\n\n2. The second incantation (PMG 4.1342 57) was a summoning spell performed by speaking a series of mystical names and Words to summon a being called Daemon to cause a person named Tereous (born from Apia) to be mentally and emotionally tortured until she came to the spellcaster Didymos (born from Taipiam).\n \n3. The third spell (PGM 4.1265 74) mentions a mysterious name called NEPHERIĒRI, which is related to Aphrodite, the goddess of love. In order to win the heart of a beautiful woman, one needs to keep it for three days of purity, offer frankincense and recite the name while offering the offering. Then, as you approach the lady, recite the name silently seven times in your mind and do this for seven consecutive days with the hope of success.\n\n4. The fourth mantra ( PGM 4.1496 1) describes an incantation recited while burning myrrh. This incantation is a prayer to myrrh in the hope that it will attract a person named [name ] woman (her mother's name was [name]), making her unable to sit, eat, look at or kiss other people, but instead had only the caster in her mind until she came to the caster.\n\nThese Spells reflect ancient people's beliefs in magic and supernatural powers, and the ways in which they attempted to influence the emotions and behavior of others through these spells."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 100920
}
```
### Image analysis
Provide an accessible image URL or BASE64_URL to parse.
This format is compatible with the [gpt-4-vision-preview](https://platform.openai.com/docs/guides/vision) API format. You can also use this format to transmit documents for parsing.
**POST /v1/chat/completions**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
// Model name
// kimi: default model
// kimi-search: online search model
// kimi-research: exploration version model
// kimi-k1: K1 model
// kimi-math: math model
// kimi-silent: model without search process output
// search/research/k1/math/silent: can be freely combined
// If using kimi+agent, fill in the agent ID for model, which is the 20-character ID of letters and numbers at the end of the browser address bar
"model": "kimi",
"messages": [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://www.moonshot.cn/assets/logo/normal-dark.png"
}
},
{
"type": "text",
"text": "What does the image describe?"
}
]
}
],
// It is recommended to turn off online search to prevent interference in interpreting results.
"use_search": false
}
```
Response data:
```json
{
"id": "cnn6l8ilnl92l36tu8ag",
"model": "kimi",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "The image shows the words "Moonshot AI", which may be the logo or brand identity of Dark Side of the Moon Technology Co., Ltd. (Moonshot AI). Usually such images are used to represent a company or product and convey brand information .Since the image is in PNG format, it could be a logo with a transparent background, used on a website, app, or other visual material."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 1710123627
}
```
### refresh_token survival detection
Check whether refresh_token is alive. If live is not true, otherwise it is false. Please do not call this interface frequently (less than 10 minutes).
**POST /token/check**
Request data:
```json
{
"token": "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9..."
}
```
Response data:
```json
{
"live": true
}
```
## Notification
### Nginx anti-generation optimization
If you are using Nginx reverse proxy kimi-free-api, please add the following configuration items to optimize the output effect of the stream and optimize the experience.
```nginx
# Turn off proxy buffering. When set to off, Nginx will immediately send client requests to the backend server and immediately send responses received from the backend server back to the client.
proxy_buffering off;
# Enable chunked transfer encoding. Chunked transfer encoding allows servers to send data in chunks for dynamically generated content without knowing the size of the content in advance.
chunked_transfer_encoding on;
# Turn on TCP_NOPUSH, which tells Nginx to send as much data as possible before sending the packet to the client. This is usually used in conjunction with sendfile to improve network efficiency.
tcp_nopush on;
# Turn on TCP_NODELAY, which tells Nginx not to delay sending data and to send small data packets immediately. In some cases, this can reduce network latency.
tcp_nodelay on;
#Set the timeout to keep the connection, here it is set to 120 seconds. If there is no further communication between client and server during this time, the connection will be closed.
keepalive_timeout 120;
```
### Token statistics
Since the inference side is not in kimi-free-api, the token cannot be counted and will be returned as a fixed number!!!!!
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=LLM-Red-Team/kimi-free-api&type=Date)](https://star-history.com/#LLM-Red-Team/kimi-free-api&Date)

BIN
doc/example-7.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

View File

@ -1,6 +1,6 @@
{ {
"name": "kimi-free-api", "name": "kimi-free-api",
"version": "0.0.19", "version": "0.0.36",
"description": "Kimi Free API Server", "description": "Kimi Free API Server",
"type": "module", "type": "module",
"main": "dist/index.js", "main": "dist/index.js",
@ -13,8 +13,8 @@
"dist/" "dist/"
], ],
"scripts": { "scripts": {
"dev": "tsup src/index.ts --format cjs,esm --sourcemap --dts --publicDir public --watch --onSuccess \"node dist/index.js\"", "dev": "tsup src/index.ts --format cjs,esm --sourcemap --dts --publicDir public --watch --onSuccess \"node --enable-source-maps --no-node-snapshot dist/index.js\"",
"start": "node dist/index.js", "start": "node --enable-source-maps --no-node-snapshot dist/index.js",
"build": "tsup src/index.ts --format cjs,esm --sourcemap --dts --clean --publicDir public" "build": "tsup src/index.ts --format cjs,esm --sourcemap --dts --clean --publicDir public"
}, },
"author": "Vinlic", "author": "Vinlic",

10
public/welcome.html Normal file
View File

@ -0,0 +1,10 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>🚀 服务已启动</title>
</head>
<body>
<p>kimi-free-api已启动<br>请通过LobeChat / NextChat / Dify等客户端或OpenAI SDK接入</p>
</body>
</html>

View File

@ -5,5 +5,6 @@ export default {
API_TOKEN_EXPIRES: [-2002, 'Token已失效'], API_TOKEN_EXPIRES: [-2002, 'Token已失效'],
API_FILE_URL_INVALID: [-2003, '远程文件URL非法'], API_FILE_URL_INVALID: [-2003, '远程文件URL非法'],
API_FILE_EXECEEDS_SIZE: [-2004, '远程文件超出大小'], API_FILE_EXECEEDS_SIZE: [-2004, '远程文件超出大小'],
API_CHAT_STREAM_PUSHING: [-2005, '已有对话流正在输出'] API_CHAT_STREAM_PUSHING: [-2005, '已有对话流正在输出'],
API_RESEARCH_EXCEEDS_LIMIT: [-2006, '探索版使用量已达到上限']
} }

View File

@ -2,8 +2,9 @@ import { PassThrough } from "stream";
import path from 'path'; import path from 'path';
import _ from 'lodash'; import _ from 'lodash';
import mime from 'mime'; import mime from 'mime';
import axios, { AxiosResponse } from 'axios'; import axios, { AxiosRequestConfig, AxiosResponse } from 'axios';
import type IStreamMessage from "../interfaces/IStreamMessage.ts";
import APIException from "@/lib/exceptions/APIException.ts"; import APIException from "@/lib/exceptions/APIException.ts";
import EX from "@/api/consts/exceptions.ts"; import EX from "@/api/consts/exceptions.ts";
import { createParser } from 'eventsource-parser' import { createParser } from 'eventsource-parser'
@ -12,27 +13,39 @@ import util from '@/lib/util.ts';
// 模型名称 // 模型名称
const MODEL_NAME = 'kimi'; const MODEL_NAME = 'kimi';
// 设备ID
const DEVICE_ID = Math.random() * 999999999999999999 + 7000000000000000000;
// SessionID
const SESSION_ID = Math.random() * 99999999999999999 + 1700000000000000000;
// access_token有效期 // access_token有效期
const ACCESS_TOKEN_EXPIRES = 300; const ACCESS_TOKEN_EXPIRES = 300;
// 最大重试次数 // 最大重试次数
const MAX_RETRY_COUNT = 3; const MAX_RETRY_COUNT = 3;
// 重试延迟 // 重试延迟
const RETRY_DELAY = 5000; const RETRY_DELAY = 5000;
// 基础URL
const BASE_URL = 'https://kimi.moonshot.cn';
// 伪装headers // 伪装headers
const FAKE_HEADERS = { const FAKE_HEADERS = {
'Accept': '*/*', 'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br, zstd', 'Accept-Encoding': 'gzip, deflate, br, zstd',
'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
'Origin': 'https://kimi.moonshot.cn', 'Cache-Control': 'no-cache',
// 'Cookie': util.generateCookie(), 'Pragma': 'no-cache',
'Origin': BASE_URL,
'Cookie': util.generateCookie(),
'R-Timezone': 'Asia/Shanghai', 'R-Timezone': 'Asia/Shanghai',
'Sec-Ch-Ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"', 'Sec-Ch-Ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
'Sec-Ch-Ua-Mobile': '?0', 'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"Windows"', 'Sec-Ch-Ua-Platform': '"Windows"',
'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin', 'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36' 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
'Priority': 'u=1, i',
'X-Msh-Device-Id': `${DEVICE_ID}`,
'X-Msh-Platform': 'web',
'X-Msh-Session-Id': `${SESSION_ID}`
}; };
// 文件最大大小 // 文件最大大小
const FILE_MAX_SIZE = 100 * 1024 * 1024; const FILE_MAX_SIZE = 100 * 1024 * 1024;
@ -54,11 +67,10 @@ async function requestToken(refreshToken: string) {
accessTokenRequestQueueMap[refreshToken] = []; accessTokenRequestQueueMap[refreshToken] = [];
logger.info(`Refresh token: ${refreshToken}`); logger.info(`Refresh token: ${refreshToken}`);
const result = await (async () => { const result = await (async () => {
const result = await axios.get('https://kimi.moonshot.cn/api/auth/token/refresh', { const result = await axios.get(`${BASE_URL}/api/auth/token/refresh`, {
headers: { headers: {
Authorization: `Bearer ${refreshToken}`, Authorization: `Bearer ${refreshToken}`,
Referer: 'https://kimi.moonshot.cn/', ...FAKE_HEADERS,
...FAKE_HEADERS
}, },
timeout: 15000, timeout: 15000,
validateStatus: () => true validateStatus: () => true
@ -67,7 +79,18 @@ async function requestToken(refreshToken: string) {
access_token, access_token,
refresh_token refresh_token
} = checkResult(result, refreshToken); } = checkResult(result, refreshToken);
const userResult = await axios.get(`${BASE_URL}/api/user`, {
headers: {
Authorization: `Bearer ${access_token}`,
...FAKE_HEADERS,
},
timeout: 15000,
validateStatus: () => true
});
if(!userResult.data.id)
throw new APIException(EX.API_REQUEST_FAILED, '获取用户信息失败');
return { return {
userId: userResult.data.id,
accessToken: access_token, accessToken: access_token,
refreshToken: refresh_token, refreshToken: refresh_token,
refreshTime: util.unixTimestamp() + ACCESS_TOKEN_EXPIRES refreshTime: util.unixTimestamp() + ACCESS_TOKEN_EXPIRES
@ -82,6 +105,7 @@ async function requestToken(refreshToken: string) {
return result; return result;
}) })
.catch(err => { .catch(err => {
logger.error(err);
if (accessTokenRequestQueueMap[refreshToken]) { if (accessTokenRequestQueueMap[refreshToken]) {
accessTokenRequestQueueMap[refreshToken].forEach(resolve => resolve(err)); accessTokenRequestQueueMap[refreshToken].forEach(resolve => resolve(err));
delete accessTokenRequestQueueMap[refreshToken]; delete accessTokenRequestQueueMap[refreshToken];
@ -100,7 +124,7 @@ async function requestToken(refreshToken: string) {
* *
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
*/ */
async function acquireToken(refreshToken: string): Promise<string> { async function acquireToken(refreshToken: string): Promise<any> {
let result = accessTokenMap.get(refreshToken); let result = accessTokenMap.get(refreshToken);
if (!result) { if (!result) {
result = await requestToken(refreshToken); result = await requestToken(refreshToken);
@ -110,7 +134,39 @@ async function acquireToken(refreshToken: string): Promise<string> {
result = await requestToken(refreshToken); result = await requestToken(refreshToken);
accessTokenMap.set(refreshToken, result); accessTokenMap.set(refreshToken, result);
} }
return result.accessToken; return result;
}
/**
*
*/
export async function request(
method: string,
uri: string,
refreshToken: string,
options: AxiosRequestConfig = {}
) {
const {
accessToken,
userId
} = await acquireToken(refreshToken);
logger.info(`url: ${uri}`);
const result = await axios({
method,
url: `${BASE_URL}${uri}`,
params: options.params,
data: options.data,
headers: {
Authorization: `Bearer ${accessToken}`,
'X-Traffic-Id': userId,
...FAKE_HEADERS,
...(options.headers || {})
},
timeout: options.timeout || 15000,
responseType: options.responseType,
validateStatus: () => true
});
return checkResult(result, refreshToken);
} }
/** /**
@ -120,23 +176,17 @@ async function acquireToken(refreshToken: string): Promise<string> {
* *
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
*/ */
async function createConversation(name: string, refreshToken: string) { async function createConversation(model: string, name: string, refreshToken: string) {
const token = await acquireToken(refreshToken);
const result = await axios.post('https://kimi.moonshot.cn/api/chat', {
name,
is_example: false
}, {
headers: {
Authorization: `Bearer ${token}`,
Referer: 'https://kimi.moonshot.cn/',
...FAKE_HEADERS
},
timeout: 15000,
validateStatus: () => true
});
const { const {
id: convId id: convId
} = checkResult(result, refreshToken); } = await request('POST', '/api/chat', refreshToken, {
data: {
enter_method: 'new_chat',
is_example: false,
kimiplus_id: /^[0-9a-z]{20}$/.test(model) ? model : 'kimi',
name
}
});
return convId; return convId;
} }
@ -148,17 +198,77 @@ async function createConversation(name: string, refreshToken: string) {
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
*/ */
async function removeConversation(convId: string, refreshToken: string) { async function removeConversation(convId: string, refreshToken: string) {
const token = await acquireToken(refreshToken); return await request('DELETE', `/api/chat/${convId}`, refreshToken);
const result = await axios.delete(`https://kimi.moonshot.cn/api/chat/${convId}`, { }
headers: {
Authorization: `Bearer ${token}`, /**
Referer: `https://kimi.moonshot.cn/chat/${convId}`, *
...FAKE_HEADERS *
}, * @param refreshToken access_token的refresh_token
timeout: 15000, */
validateStatus: () => true async function getSuggestion(query: string, refreshToken: string) {
return await request('POST', '/api/suggestion', refreshToken, {
data: {
offset: 0,
page_referer: 'chat',
query: query.replace('user:', '').replace('assistant:', ''),
scene: 'first_round',
size: 10
}
}); });
checkResult(result, refreshToken); }
/**
* N2S
*
* N2S
*
* @param model
* @param messages gpt系列消息格式
* @param refs ID列表
* @param refreshToken access_token的refresh_token
* @param refConvId ID
*/
async function preN2s(model: string, messages: { role: string, content: string }[], refs: string[], refreshToken: string, refConvId?: string) {
const isSearchModel = model.indexOf('search') != -1;
return await request('POST', `/api/chat/${refConvId}/pre-n2s`, refreshToken, {
data: {
is_pro_search: false,
kimiplus_id: /^[0-9a-z]{20}$/.test(model) ? model : 'kimi',
messages,
refs,
use_search: isSearchModel
}
});
}
/**
* token计数
*
* @param query
* @param refreshToken access_token的refresh_token
* @param refConvId ID
*/
async function tokenSize(query: string, refs: string[], refreshToken: string, refConvId: string) {
return await request('POST', `/api/chat/${refConvId}/token_size`, refreshToken, {
data: {
content: query,
refs: []
}
});
}
/**
* 使
*
* @param refreshToken access_token的refresh_token
*/
async function getResearchUsage(refreshToken: string): Promise<{
remain,
total,
used
}> {
return await request('GET', '/api/chat/research/usage', refreshToken);
} }
/** /**
@ -167,49 +277,110 @@ async function removeConversation(convId: string, refreshToken: string) {
* @param model * @param model
* @param messages gpt系列消息格式 * @param messages gpt系列消息格式
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
* @param useSearch * @param refConvId ID
* @param retryCount * @param retryCount
*/ */
async function createCompletion(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) { async function createCompletion(model = MODEL_NAME, messages: any[], refreshToken: string, refConvId?: string, retryCount = 0, segmentId?: string): Promise<IStreamMessage> {
return (async () => { return (async () => {
logger.info(messages); logger.info(messages);
// 创建会话
const convId = /[0-9a-zA-Z]{20}/.test(refConvId) ? refConvId : await createConversation(model, "未命名会话", refreshToken);
// 提取引用文件URL并上传kimi获得引用的文件ID列表 // 提取引用文件URL并上传kimi获得引用的文件ID列表
const refFileUrls = extractRefFileUrls(messages); const refFileUrls = extractRefFileUrls(messages);
const refs = refFileUrls.length ? await Promise.all(refFileUrls.map(fileUrl => uploadFile(fileUrl, refreshToken))) : []; const refResults = refFileUrls.length ? await Promise.all(refFileUrls.map(fileUrl => uploadFile(fileUrl, refreshToken, convId))) : [];
const refs = refResults.map(result => result.id);
const refsFile = refResults.map(result => ({
detail: result,
done: true,
file: {},
file_info: result,
id: result.id,
name: result.name,
parse_status: 'success',
size: result.size,
upload_progress: 100,
upload_status: 'success'
}));
// 伪装调用获取用户信息 // 伪装调用获取用户信息
fakeRequest(refreshToken) fakeRequest(refreshToken)
.catch(err => logger.error(err)); .catch(err => logger.error(err));
// 创建会话 // 消息预处理
const convId = await createConversation(`cmpl-${util.uuid(false)}`, refreshToken); const sendMessages = messagesPrepare(messages, !!refConvId);
// 请求流 !segmentId && preN2s(model, sendMessages, refs, refreshToken, convId)
const token = await acquireToken(refreshToken); .catch(err => logger.error(err));
const result = await axios.post(`https://kimi.moonshot.cn/api/chat/${convId}/completion/stream`, { getSuggestion(sendMessages[0].content, refreshToken)
messages: messagesPrepare(messages), .catch(err => logger.error(err));
refs, tokenSize(sendMessages[0].content, refs, refreshToken, convId)
use_search: useSearch .catch(err => logger.error(err));
}, {
headers: { const isMath = model.indexOf('math') != -1;
Authorization: `Bearer ${token}`, const isSearchModel = model.indexOf('search') != -1;
Referer: `https://kimi.moonshot.cn/chat/${convId}`, const isResearchModel = model.indexOf('research') != -1;
...FAKE_HEADERS const isK1Model = model.indexOf('k1') != -1;
logger.info(`使用模型: ${model},是否联网检索: ${isSearchModel},是否探索版: ${isResearchModel}是否K1模型: ${isK1Model},是否数学模型: ${isMath}`);
if(segmentId)
logger.info(`继续请求segmentId: ${segmentId}`);
// 检查探索版使用量
if(isResearchModel) {
const {
total,
used
} = await getResearchUsage(refreshToken);
if(used >= total)
throw new APIException(EX.API_RESEARCH_EXCEEDS_LIMIT, `探索版使用量已达到上限`);
logger.info(`探索版当前额度: ${used}/${total}`);
}
const kimiplusId = isK1Model ? 'crm40ee9e5jvhsn7ptcg' : (/^[0-9a-z]{20}$/.test(model) ? model : 'kimi');
// 请求补全流
const stream = await request('POST', `/api/chat/${convId}/completion/stream`, refreshToken, {
data: segmentId ? {
segment_id: segmentId,
action: 'continue',
messages: [{ role: 'user', content: ' ' }],
kimiplus_id: kimiplusId,
extend: { sidebar: true }
} : {
kimiplus_id: kimiplusId,
messages: sendMessages,
refs,
refs_file: refsFile,
use_math: isMath,
use_research: isResearchModel,
use_search: isSearchModel,
extend: { sidebar: true }
},
headers: {
Referer: `https://kimi.moonshot.cn/chat/${convId}`
}, },
// 120秒超时
timeout: 120000,
validateStatus: () => true,
responseType: 'stream' responseType: 'stream'
}); });
const streamStartTime = util.timestamp(); const streamStartTime = util.timestamp();
// 接收流为输出文本 // 接收流为输出文本
const answer = await receiveStream(model, convId, result.data); const answer = await receiveStream(model, convId, stream);
// 如果上次请求生成长度超限,则继续请求
if(answer.choices[0].finish_reason == 'length' && answer.segment_id) {
const continueAnswer = await createCompletion(model, [], refreshToken, convId, retryCount, answer.segment_id);
answer.choices[0].message.content += continueAnswer.choices[0].message.content;
}
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`); logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略 // 异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
removeConversation(convId, refreshToken) // 如果引用会话将不会清除,因为我们不知道什么时候你会结束会话
!refConvId && removeConversation(convId, refreshToken)
.catch(err => console.error(err)); .catch(err => console.error(err));
return answer; return answer;
@ -220,7 +391,7 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`); logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => { return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY)); await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletion(model, messages, refreshToken, useSearch, retryCount + 1); return createCompletion(model, messages, refreshToken, refConvId, retryCount + 1);
})(); })();
} }
throw err; throw err;
@ -233,47 +404,91 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
* @param model * @param model
* @param messages gpt系列消息格式 * @param messages gpt系列消息格式
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
* @param useSearch * @param refConvId ID
* @param retryCount * @param retryCount
*/ */
async function createCompletionStream(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) { async function createCompletionStream(model = MODEL_NAME, messages: any[], refreshToken: string, refConvId?: string, retryCount = 0) {
return (async () => { return (async () => {
logger.info(messages); logger.info(messages);
// 创建会话
const convId = /[0-9a-zA-Z]{20}/.test(refConvId) ? refConvId : await createConversation(model, "未命名会话", refreshToken);
// 提取引用文件URL并上传kimi获得引用的文件ID列表 // 提取引用文件URL并上传kimi获得引用的文件ID列表
const refFileUrls = extractRefFileUrls(messages); const refFileUrls = extractRefFileUrls(messages);
const refs = refFileUrls.length ? await Promise.all(refFileUrls.map(fileUrl => uploadFile(fileUrl, refreshToken))) : []; const refResults = refFileUrls.length ? await Promise.all(refFileUrls.map(fileUrl => uploadFile(fileUrl, refreshToken, convId))) : [];
const refs = refResults.map(result => result.id);
const refsFile = refResults.map(result => ({
detail: result,
done: true,
file: {},
file_info: result,
id: result.id,
name: result.name,
parse_status: 'success',
size: result.size,
upload_progress: 100,
upload_status: 'success'
}));
// 伪装调用获取用户信息 // 伪装调用获取用户信息
fakeRequest(refreshToken) fakeRequest(refreshToken)
.catch(err => logger.error(err)); .catch(err => logger.error(err));
// 创建会话 const sendMessages = messagesPrepare(messages, !!refConvId);
const convId = await createConversation(`cmpl-${util.uuid(false)}`, refreshToken);
// 请求流 preN2s(model, sendMessages, refs, refreshToken, convId)
const token = await acquireToken(refreshToken); .catch(err => logger.error(err));
const result = await axios.post(`https://kimi.moonshot.cn/api/chat/${convId}/completion/stream`, { getSuggestion(sendMessages[0].content, refreshToken)
messages: messagesPrepare(messages), .catch(err => logger.error(err));
refs, tokenSize(sendMessages[0].content, refs, refreshToken, convId)
use_search: useSearch .catch(err => logger.error(err));
}, {
// 120秒超时 const isMath = model.indexOf('math') != -1;
timeout: 120000, const isSearchModel = model.indexOf('search') != -1;
headers: { const isResearchModel = model.indexOf('research') != -1;
Authorization: `Bearer ${token}`, const isK1Model = model.indexOf('k1') != -1;
Referer: `https://kimi.moonshot.cn/chat/${convId}`,
...FAKE_HEADERS logger.info(`使用模型: ${model},是否联网检索: ${isSearchModel},是否探索版: ${isResearchModel}是否K1模型: ${isK1Model},是否数学模型: ${isMath}`);
// 检查探索版使用量
if(isResearchModel) {
const {
total,
used
} = await getResearchUsage(refreshToken);
if(used >= total)
throw new APIException(EX.API_RESEARCH_EXCEEDS_LIMIT, `探索版使用量已达到上限`);
logger.info(`探索版当前额度: ${used}/${total}`);
}
const kimiplusId = isK1Model ? 'crm40ee9e5jvhsn7ptcg' : (/^[0-9a-z]{20}$/.test(model) ? model : 'kimi');
// 请求补全流
const stream = await request('POST', `/api/chat/${convId}/completion/stream`, refreshToken, {
data: {
kimiplus_id: kimiplusId,
messages: sendMessages,
refs,
refs_file: refsFile,
use_math: isMath,
use_research: isResearchModel,
use_search: isSearchModel,
extend: { sidebar: true }
},
headers: {
Referer: `https://kimi.moonshot.cn/chat/${convId}`
}, },
validateStatus: () => true,
responseType: 'stream' responseType: 'stream'
}); });
const streamStartTime = util.timestamp(); const streamStartTime = util.timestamp();
// 创建转换流将消息格式转换为gpt兼容格式 // 创建转换流将消息格式转换为gpt兼容格式
return createTransStream(model, convId, result.data, () => { return createTransStream(model, convId, stream, () => {
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`); logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 流传输结束后异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略 // 流传输结束后异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
removeConversation(convId, refreshToken) // 如果引用会话将不会清除,因为我们不知道什么时候你会结束会话
!refConvId && removeConversation(convId, refreshToken)
.catch(err => console.error(err)); .catch(err => console.error(err));
}); });
})() })()
@ -283,7 +498,7 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`); logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => { return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY)); await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletionStream(model, messages, refreshToken, useSearch, retryCount + 1); return createCompletionStream(model, messages, refreshToken, refConvId, retryCount + 1);
})(); })();
} }
throw err; throw err;
@ -298,28 +513,29 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
*/ */
async function fakeRequest(refreshToken: string) { async function fakeRequest(refreshToken: string) {
const token = await acquireToken(refreshToken);
const options = {
headers: {
Authorization: `Bearer ${token}`,
Referer: `https://kimi.moonshot.cn/`,
...FAKE_HEADERS
}
};
await [ await [
() => axios.get('https://kimi.moonshot.cn/api/user', options), () => request('GET', '/api/user', refreshToken),
() => axios.get('https://kimi.moonshot.cn/api/chat_1m/user/status', options), () => request('POST', '/api/user/usage', refreshToken, {
() => axios.post('https://kimi.moonshot.cn/api/chat/list', { data: {
offset: 0, usage: ['kimiv', 'math']
size: 50 }
}, options), }),
() => axios.post('https://kimi.moonshot.cn/api/show_case/list', { () => request('GET', '/api/chat_1m/user/status', refreshToken),
offset: 0, () => request('GET', '/api/kimi_mv/user/status', refreshToken),
size: 4, () => request('POST', '/api/kimiplus/history', refreshToken),
enable_cache: true, () => request('POST', '/api/kimiplus/search', refreshToken, {
order: "asc" data: {
}, options) offset: 0,
][Math.floor(Math.random() * 4)](); size: 20
}
}),
() => request('POST', '/api/chat/list', refreshToken, {
data: {
offset: 0,
size: 50
}
}),
][Math.floor(Math.random() * 7)]();
} }
/** /**
@ -347,6 +563,7 @@ function extractRefFileUrls(messages: any[]) {
urls.push(v['image_url']['url']); urls.push(v['image_url']['url']);
}); });
} }
logger.info("本次请求上传:" + urls.length + "个文件");
return urls; return urls;
} }
@ -359,26 +576,55 @@ function extractRefFileUrls(messages: any[]) {
* user:新消息 * user:新消息
* *
* @param messages gpt系列消息格式 * @param messages gpt系列消息格式
* @param isRefConv
*/ */
function messagesPrepare(messages: any[]) { function messagesPrepare(messages: any[], isRefConv = false) {
// 只保留最新消息以及不包含"type": "image_url"或"type": "file"的消息 let content;
let validMessages = messages.filter((message, index) => { if (isRefConv || messages.length < 2) {
if (index === messages.length - 1) return true; content = messages.reduce((content, message) => {
if (!Array.isArray(message.content)) return true; if (_.isArray(message.content)) {
// 不含"type": "image_url"或"type": "file"的消息保留 return message.content.reduce((_content, v) => {
return !message.content.some(v => (typeof v === 'object' && ['file', 'image_url'].includes(v['type']))); if (!_.isObject(v) || v['type'] != 'text') return _content;
}); return _content + `${v["text"] || ""}\n`;
}, content);
const content = validMessages.reduce((content, message) => { }
if (Array.isArray(message.content)) { return content += `${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
return message.content.reduce((_content, v) => { }, '')
if (!_.isObject(v) || v['type'] != 'text') logger.info("\n透传内容\n" + content);
return _content; }
return _content + (v['text'] || ''); else {
}, content); // 注入消息提升注意力
let latestMessage = messages[messages.length - 1];
let hasFileOrImage = Array.isArray(latestMessage.content)
&& latestMessage.content.some(v => (typeof v === 'object' && ['file', 'image_url'].includes(v['type'])));
// 第二轮开始注入system prompt
if (hasFileOrImage) {
let newFileMessage = {
"content": "关注用户最新发送文件和消息",
"role": "system"
};
messages.splice(messages.length - 1, 0, newFileMessage);
logger.info("注入提升尾部文件注意力system prompt");
} else {
let newTextMessage = {
"content": "关注用户最新的消息",
"role": "system"
};
messages.splice(messages.length - 1, 0, newTextMessage);
logger.info("注入提升尾部消息注意力system prompt");
} }
return content += `${message.role || 'user'}:${wrapUrlsToTags(message.content)}\n`; content = messages.reduce((content, message) => {
}, ''); if (_.isArray(message.content)) {
return message.content.reduce((_content, v) => {
if (!_.isObject(v) || v['type'] != 'text') return _content;
return _content + `${message.role || "user"}:${v["text"] || ""}\n`;
}, content);
}
return content += `${message.role || "user"}:${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
}, '')
logger.info("\n对话合并\n" + content);
}
return [ return [
{ role: 'user', content } { role: 'user', content }
] ]
@ -401,16 +647,20 @@ function wrapUrlsToTags(content: string) {
* @param filename * @param filename
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
*/ */
async function preSignUrl(filename: string, refreshToken: string) { async function preSignUrl(action: string, filename: string, refreshToken: string) {
const token = await acquireToken(refreshToken); const {
accessToken,
userId
} = await acquireToken(refreshToken);
const result = await axios.post('https://kimi.moonshot.cn/api/pre-sign-url', { const result = await axios.post('https://kimi.moonshot.cn/api/pre-sign-url', {
action: 'file', action,
name: filename name: filename
}, { }, {
timeout: 15000, timeout: 15000,
headers: { headers: {
Authorization: `Bearer ${token}`, Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`, Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS ...FAKE_HEADERS
}, },
validateStatus: () => true validateStatus: () => true
@ -445,8 +695,9 @@ async function checkFileUrl(fileUrl: string) {
* *
* @param fileUrl URL * @param fileUrl URL
* @param refreshToken access_token的refresh_token * @param refreshToken access_token的refresh_token
* @param refConvId ID
*/ */
async function uploadFile(fileUrl: string, refreshToken: string) { async function uploadFile(fileUrl: string, refreshToken: string, refConvId?: string) {
// 预检查远程文件URL可用性 // 预检查远程文件URL可用性
await checkFileUrl(fileUrl); await checkFileUrl(fileUrl);
@ -470,63 +721,93 @@ async function uploadFile(fileUrl: string, refreshToken: string) {
})); }));
} }
const fileType = (mimeType || '').includes('image') ? 'image' : 'file';
// 获取预签名文件URL // 获取预签名文件URL
const { let {
url: uploadUrl, url: uploadUrl,
object_name: objectName object_name: objectName,
} = await preSignUrl(filename, refreshToken); file_id: fileId
} = await preSignUrl(fileType, filename, refreshToken);
// 获取文件的MIME类型 // 获取文件的MIME类型
mimeType = mimeType || mime.getType(filename); mimeType = mimeType || mime.getType(filename);
// 上传文件到目标OSS // 上传文件到目标OSS
const token = await acquireToken(refreshToken); const {
accessToken,
userId
} = await acquireToken(refreshToken);
let result = await axios.request({ let result = await axios.request({
method: 'PUT', method: 'PUT',
url: uploadUrl, url: uploadUrl,
data: fileData, data: fileData,
// 100M限制 // 100M限制
maxBodyLength: FILE_MAX_SIZE, maxBodyLength: FILE_MAX_SIZE,
// 60秒超时 // 120秒超时
timeout: 60000, timeout: 120000,
headers: { headers: {
'Content-Type': mimeType, 'Content-Type': mimeType,
Authorization: `Bearer ${token}`, Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`, Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS ...FAKE_HEADERS
}, },
validateStatus: () => true validateStatus: () => true
}); });
checkResult(result, refreshToken); checkResult(result, refreshToken);
// 获取文件上传结果 let status, startTime = Date.now();
result = await axios.post('https://kimi.moonshot.cn/api/file', { let fileDetail;
type: 'file', while (status != 'initialized' && status != 'parsed') {
name: filename, if (Date.now() - startTime > 30000)
object_name: objectName, throw new Error('文件等待处理超时');
timeout: 15000 // 获取文件上传结果
}, { result = await axios.post('https://kimi.moonshot.cn/api/file', fileType == 'image' ? {
headers: { type: 'image',
Authorization: `Bearer ${token}`, file_id: fileId,
Referer: `https://kimi.moonshot.cn/`, name: filename
...FAKE_HEADERS } : {
} type: 'file',
}); name: filename,
const { id: fileId } = checkResult(result, refreshToken); object_name: objectName,
file_id: '',
chat_id: refConvId
}, {
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS
}
});
fileDetail = checkResult(result, refreshToken);
({ id: fileId, status } = fileDetail);
}
// 处理文件转换 startTime = Date.now();
result = await axios.post('https://kimi.moonshot.cn/api/file/parse_process', { let parseFinish = status == 'parsed';
ids: [fileId], while (!parseFinish) {
timeout: 120000 if (Date.now() - startTime > 30000)
}, { throw new Error('文件等待处理超时');
headers: { // 处理文件转换
Authorization: `Bearer ${token}`, parseFinish = await new Promise(resolve => {
Referer: `https://kimi.moonshot.cn/`, axios.post('https://kimi.moonshot.cn/api/file/parse_process', {
...FAKE_HEADERS ids: [fileId],
} timeout: 120000
}); }, {
checkResult(result, refreshToken); headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS
}
})
.then(() => resolve(true))
.catch(() => resolve(false));
});
}
return fileId; return fileDetail;
} }
/** /**
@ -559,7 +840,9 @@ function checkResult(result: AxiosResponse, refreshToken: string) {
* @param convId ID * @param convId ID
* @param stream * @param stream
*/ */
async function receiveStream(model: string, convId: string, stream: any) { async function receiveStream(model: string, convId: string, stream: any): Promise<IStreamMessage> {
let webSearchCount = 0;
let temp = Buffer.from('');
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
// 消息初始化 // 消息初始化
const data = { const data = {
@ -570,10 +853,11 @@ async function receiveStream(model: string, convId: string, stream: any) {
{ index: 0, message: { role: 'assistant', content: '' }, finish_reason: 'stop' } { index: 0, message: { role: 'assistant', content: '' }, finish_reason: 'stop' }
], ],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
segment_id: '',
created: util.unixTimestamp() created: util.unixTimestamp()
}; };
let refContent = ''; let refContent = '';
const silentSearch = model.indexOf('silent_search') != -1; const silentSearch = model.indexOf('silent') != -1;
const parser = createParser(event => { const parser = createParser(event => {
try { try {
if (event.type !== "event") return; if (event.type !== "event") return;
@ -583,8 +867,16 @@ async function receiveStream(model: string, convId: string, stream: any) {
throw new Error(`Stream response invalid: ${event.data}`); throw new Error(`Stream response invalid: ${event.data}`);
// 处理消息 // 处理消息
if (result.event == 'cmpl' && result.text) { if (result.event == 'cmpl' && result.text) {
const exceptCharIndex = result.text.indexOf("<22>"); data.choices[0].message.content += result.text;
data.choices[0].message.content += result.text.substring(0, exceptCharIndex == -1 ? result.text.length : exceptCharIndex); }
// 处理请求ID
else if(result.event == 'req') {
data.segment_id = result.id;
}
// 处理超长文本
else if(result.event == 'length') {
logger.warn('此次生成达到max_tokens稍候将继续请求拼接完整响应');
data.choices[0].finish_reason = 'length';
} }
// 处理结束或错误 // 处理结束或错误
else if (result.event == 'all_done' || result.event == 'error') { else if (result.event == 'all_done' || result.event == 'error') {
@ -593,8 +885,10 @@ async function receiveStream(model: string, convId: string, stream: any) {
resolve(data); resolve(data);
} }
// 处理联网搜索 // 处理联网搜索
else if (!silentSearch && result.event == 'search_plus' && result.msg && result.msg.type == 'get_res') else if (!silentSearch && result.event == 'search_plus' && result.msg && result.msg.type == 'get_res') {
refContent += `${result.msg.title}(${result.msg.url})\n`; webSearchCount += 1;
refContent += `【检索 ${webSearchCount}】 [${result.msg.title}](${result.msg.url})\n\n`;
}
// else // else
// logger.warn(result.event, result); // logger.warn(result.event, result);
} }
@ -604,7 +898,20 @@ async function receiveStream(model: string, convId: string, stream: any) {
} }
}); });
// 将流数据喂给SSE转换器 // 将流数据喂给SSE转换器
stream.on("data", buffer => parser.feed(buffer.toString())); stream.on("data", buffer => {
// 检查buffer是否以完整UTF8字符结尾
if (buffer.toString().indexOf('<27>') != -1) {
// 如果不完整则累积buffer直到收到完整字符
temp = Buffer.concat([temp, buffer]);
return;
}
// 将之前累积的不完整buffer拼接
if (temp.length > 0) {
buffer = Buffer.concat([temp, buffer]);
temp = Buffer.from('');
}
parser.feed(buffer.toString());
});
stream.once("error", err => reject(err)); stream.once("error", err => reject(err));
stream.once("close", () => resolve(data)); stream.once("close", () => resolve(data));
}); });
@ -625,8 +932,11 @@ function createTransStream(model: string, convId: string, stream: any, endCallba
const created = util.unixTimestamp(); const created = util.unixTimestamp();
// 创建转换流 // 创建转换流
const transStream = new PassThrough(); const transStream = new PassThrough();
let webSearchCount = 0;
let searchFlag = false; let searchFlag = false;
const silentSearch = model.indexOf('silent_search') != -1; let lengthExceed = false;
let segmentId = '';
const silentSearch = model.indexOf('silent') != -1;
!transStream.closed && transStream.write(`data: ${JSON.stringify({ !transStream.closed && transStream.write(`data: ${JSON.stringify({
id: convId, id: convId,
model, model,
@ -634,6 +944,7 @@ function createTransStream(model: string, convId: string, stream: any, endCallba
choices: [ choices: [
{ index: 0, delta: { role: 'assistant', content: '' }, finish_reason: null } { index: 0, delta: { role: 'assistant', content: '' }, finish_reason: null }
], ],
segment_id: '',
created created
})}\n\n`); })}\n\n`);
const parser = createParser(event => { const parser = createParser(event => {
@ -654,12 +965,21 @@ function createTransStream(model: string, convId: string, stream: any, endCallba
choices: [ choices: [
{ index: 0, delta: { content: (searchFlag ? '\n' : '') + chunk }, finish_reason: null } { index: 0, delta: { content: (searchFlag ? '\n' : '') + chunk }, finish_reason: null }
], ],
segment_id: segmentId,
created created
})}\n\n`; })}\n\n`;
if (searchFlag) if (searchFlag)
searchFlag = false; searchFlag = false;
!transStream.closed && transStream.write(data); !transStream.closed && transStream.write(data);
} }
// 处理请求ID
else if(result.event == 'req') {
segmentId = result.id;
}
// 处理超长文本
else if (result.event == 'length') {
lengthExceed = true;
}
// 处理结束或错误 // 处理结束或错误
else if (result.event == 'all_done' || result.event == 'error') { else if (result.event == 'all_done' || result.event == 'error') {
const data = `data: ${JSON.stringify({ const data = `data: ${JSON.stringify({
@ -670,10 +990,11 @@ function createTransStream(model: string, convId: string, stream: any, endCallba
{ {
index: 0, delta: result.event == 'error' ? { index: 0, delta: result.event == 'error' ? {
content: '\n[内容由于不合规被停止生成,我们换个话题吧]' content: '\n[内容由于不合规被停止生成,我们换个话题吧]'
} : {}, finish_reason: 'stop' } : {}, finish_reason: lengthExceed ? 'length' : 'stop'
} }
], ],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
segment_id: segmentId,
created created
})}\n\n`; })}\n\n`;
!transStream.closed && transStream.write(data); !transStream.closed && transStream.write(data);
@ -684,6 +1005,7 @@ function createTransStream(model: string, convId: string, stream: any, endCallba
else if (!silentSearch && result.event == 'search_plus' && result.msg && result.msg.type == 'get_res') { else if (!silentSearch && result.event == 'search_plus' && result.msg && result.msg.type == 'get_res') {
if (!searchFlag) if (!searchFlag)
searchFlag = true; searchFlag = true;
webSearchCount += 1;
const data = `data: ${JSON.stringify({ const data = `data: ${JSON.stringify({
id: convId, id: convId,
model, model,
@ -691,10 +1013,11 @@ function createTransStream(model: string, convId: string, stream: any, endCallba
choices: [ choices: [
{ {
index: 0, delta: { index: 0, delta: {
content: `检索 ${result.msg.title}(${result.msg.url}) ...\n` content: `检索 ${webSearchCount}】 [${result.msg.title}](${result.msg.url})\n`
}, finish_reason: null }, finish_reason: null
} }
], ],
segment_id: segmentId,
created created
})}\n\n`; })}\n\n`;
!transStream.closed && transStream.write(data); !transStream.closed && transStream.write(data);
@ -723,9 +1046,35 @@ function tokenSplit(authorization: string) {
return authorization.replace('Bearer ', '').split(','); return authorization.replace('Bearer ', '').split(',');
} }
/**
* Token存活状态
*/
async function getTokenLiveStatus(refreshToken: string) {
const result = await axios.get('https://kimi.moonshot.cn/api/auth/token/refresh', {
headers: {
Authorization: `Bearer ${refreshToken}`,
Referer: 'https://kimi.moonshot.cn/',
...FAKE_HEADERS
},
timeout: 15000,
validateStatus: () => true
});
try {
const {
access_token,
refresh_token
} = checkResult(result, refreshToken);
return !!(access_token && refresh_token)
}
catch (err) {
return false;
}
}
export default { export default {
createConversation, createConversation,
createCompletion, createCompletion,
createCompletionStream, createCompletionStream,
getTokenLiveStatus,
tokenSplit tokenSplit
}; };

View File

@ -0,0 +1,20 @@
export default interface IStreamMessage {
id: string;
model: string;
object: string;
choices: {
index: number;
message: {
role: string;
content: string;
};
finish_reason: string;
}[];
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
segment_id?: string;
created: number;
}

View File

@ -13,22 +13,26 @@ export default {
'/completions': async (request: Request) => { '/completions': async (request: Request) => {
request request
.validate('body.conversation_id', v => _.isUndefined(v) || _.isString(v))
.validate('body.messages', _.isArray) .validate('body.messages', _.isArray)
.validate('headers.authorization', _.isString) .validate('headers.authorization', _.isString)
// refresh_token切分 // refresh_token切分
const tokens = chat.tokenSplit(request.headers.authorization); const tokens = chat.tokenSplit(request.headers.authorization);
// 随机挑选一个refresh_token // 随机挑选一个refresh_token
const token = _.sample(tokens); const token = _.sample(tokens);
const model = request.body.model; let { model, conversation_id: convId, messages, stream, use_search } = request.body;
const messages = request.body.messages;
if (request.body.stream) { if(use_search)
const stream = await chat.createCompletionStream(model, messages, token, request.body.use_search); model = 'kimi-search';
if (stream) {
const stream = await chat.createCompletionStream(model, messages, token, convId);
return new Response(stream, { return new Response(stream, {
type: "text/event-stream" type: "text/event-stream"
}); });
} }
else else
return await chat.createCompletion(model, messages, token, request.body.use_search); return await chat.createCompletion(model, messages, token, convId);
} }
} }

View File

@ -1,7 +1,27 @@
import fs from 'fs-extra';
import Response from '@/lib/response/Response.ts';
import chat from "./chat.ts"; import chat from "./chat.ts";
import ping from "./ping.ts"; import ping from "./ping.ts";
import token from './token.ts';
import models from './models.ts';
export default [ export default [
{
get: {
'/': async () => {
const content = await fs.readFile('public/welcome.html');
return new Response(content, {
type: 'html',
headers: {
Expires: '-1'
}
});
}
}
},
chat, chat,
ping ping,
token,
models
]; ];

41
src/api/routes/models.ts Normal file
View File

@ -0,0 +1,41 @@
import _ from 'lodash';
export default {
prefix: '/v1',
get: {
'/models': async () => {
return {
"data": [
{
"id": "moonshot-v1",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-8k",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-32k",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-128k",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-vision",
"object": "model",
"owned_by": "kimi-free-api"
}
]
};
}
}
}

25
src/api/routes/token.ts Normal file
View File

@ -0,0 +1,25 @@
import _ from 'lodash';
import Request from '@/lib/request/Request.ts';
import Response from '@/lib/response/Response.ts';
import chat from '@/api/controllers/chat.ts';
import logger from '@/lib/logger.ts';
export default {
prefix: '/token',
post: {
'/check': async (request: Request) => {
request
.validate('body.token', _.isString)
const live = await chat.getTokenLiveStatus(request.body.token);
return {
live
}
}
}
}

View File

@ -9,13 +9,15 @@ import { format as dateFormat } from 'date-fns';
import config from './config.ts'; import config from './config.ts';
import util from './util.ts'; import util from './util.ts';
const isVercelEnv = process.env.VERCEL;
class LogWriter { class LogWriter {
#buffers = []; #buffers = [];
constructor() { constructor() {
fs.ensureDirSync(config.system.logDirPath); !isVercelEnv && fs.ensureDirSync(config.system.logDirPath);
this.work(); !isVercelEnv && this.work();
} }
push(content) { push(content) {
@ -24,16 +26,16 @@ class LogWriter {
} }
writeSync(buffer) { writeSync(buffer) {
fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer); !isVercelEnv && fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer);
} }
async write(buffer) { async write(buffer) {
await fs.appendFile(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer); !isVercelEnv && await fs.appendFile(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer);
} }
flush() { flush() {
if(!this.#buffers.length) return; if(!this.#buffers.length) return;
fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), Buffer.concat(this.#buffers)); !isVercelEnv && fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), Buffer.concat(this.#buffers));
} }
work() { work() {

View File

@ -15,7 +15,7 @@ export default class FailureBody extends Body {
else if(error instanceof APIException || error instanceof Exception) else if(error instanceof APIException || error instanceof Exception)
({ errcode, errmsg, data, httpStatusCode } = error); ({ errcode, errmsg, data, httpStatusCode } = error);
else if(_.isError(error)) else if(_.isError(error))
error = new Exception(EX.SYSTEM_ERROR, error.message); ({ errcode, errmsg, data, httpStatusCode } = new Exception(EX.SYSTEM_ERROR, error.message));
super({ super({
code: errcode || -1, code: errcode || -1,
message: errmsg || 'Internal error', message: errmsg || 'Internal error',

View File

@ -73,7 +73,11 @@ class Server {
this.app.use((ctx: any) => { this.app.use((ctx: any) => {
const request = new Request(ctx); const request = new Request(ctx);
logger.debug(`-> ${ctx.request.method} ${ctx.request.url} request is not supported - ${request.remoteIP || "unknown"}`); logger.debug(`-> ${ctx.request.method} ${ctx.request.url} request is not supported - ${request.remoteIP || "unknown"}`);
const failureBody = new FailureBody(new Exception(EX.SYSTEM_NOT_ROUTE_MATCHING, "Request is not supported")); // const failureBody = new FailureBody(new Exception(EX.SYSTEM_NOT_ROUTE_MATCHING, "Request is not supported"));
// const response = new Response(failureBody);
const message = `[请求有误]: 正确请求为 POST -> /v1/chat/completions当前请求为 ${ctx.request.method} -> ${ctx.request.url} 请纠正`;
logger.warn(message);
const failureBody = new FailureBody(new Error(message));
const response = new Response(failureBody); const response = new Response(failureBody);
response.injectTo(ctx); response.injectTo(ctx);
if(config.system.requestLog) if(config.system.requestLog)

View File

@ -50,13 +50,10 @@ const util = {
generateCookie() { generateCookie() {
const timestamp = util.unixTimestamp(); const timestamp = util.unixTimestamp();
const items = [ const items = [
`Hm_lvt_4532beacc312859e0aa3e4a80566b706=${timestamp - Math.round(Math.random() * 2592000)}`,
`Hm_lvt_358cae4815e85d48f7e8ab7f3680a74b=${timestamp - Math.round(Math.random() * 2592000)}`, `Hm_lvt_358cae4815e85d48f7e8ab7f3680a74b=${timestamp - Math.round(Math.random() * 2592000)}`,
`_ga=GA1.1.${util.generateRandomString({ length: 10, charset: 'numeric' })}.${timestamp - Math.round(Math.random() * 2592000)}`, `_ga=GA1.1.${util.generateRandomString({ length: 10, charset: 'numeric' })}.${timestamp - Math.round(Math.random() * 2592000)}`,
`_ga_31QPQG2YYD=GS1.1.${timestamp - Math.round(Math.random() * 2592000)}.17.0.${timestamp - Math.round(Math.random() * 2592000)}.0.0.0`, `_ga_YXD8W70SZP=GS1.1.${timestamp - Math.round(Math.random() * 2592000)}.1.1.${timestamp - Math.round(Math.random() * 2592000)}.0.0.0`,
`Hm_lpvt_4532beacc312859e0aa3e4a80566b706=${timestamp - Math.round(Math.random() * 2592000)}`, `Hm_lpvt_358cae4815e85d48f7e8ab7f3680a74b=${timestamp - Math.round(Math.random() * 2592000)}`
`Hm_lpvt_358cae4815e85d48f7e8ab7f3680a74b=${timestamp - Math.round(Math.random() * 2592000)}`,
`_ga_YXD8W70SZP=GS1.1.${timestamp - Math.round(Math.random() * 2592000)}.35.1.${timestamp - Math.round(Math.random() * 2592000)}.0.0.0`
]; ];
return items.join('; '); return items.join('; ');
}, },

27
vercel.json Normal file
View File

@ -0,0 +1,27 @@
{
"builds": [
{
"src": "./dist/*.html",
"use": "@vercel/static"
},
{
"src": "./dist/index.js",
"use": "@vercel/node"
}
],
"routes": [
{
"src": "/",
"dest": "/dist/welcome.html"
},
{
"src": "/(.*)",
"dest": "/dist",
"headers": {
"Access-Control-Allow-Credentials": "true",
"Access-Control-Allow-Methods": "GET,OPTIONS,PATCH,DELETE,POST,PUT",
"Access-Control-Allow-Headers": "X-CSRF-Token, X-Requested-With, Accept, Accept-Version, Content-Length, Content-MD5, Content-Type, Date, X-Api-Version, Content-Type, Authorization"
}
}
]
}

1555
yarn.lock Normal file

File diff suppressed because it is too large Load Diff