Compare commits

..

104 Commits

Author SHA1 Message Date
Vinlic
2f12a5daef Release 0.0.35 2024-12-31 12:47:47 +08:00
Vinlic
2ce738b1fc 修复非预期模型名处理 2024-12-31 12:47:16 +08:00
Vinlic
719e3b682f 支持GLM-4-Plus以及Zero思考推理模型 2024-12-31 11:32:25 +08:00
Vinlic科技
57b042d187
Update README.md 2024-12-14 01:58:46 +08:00
Vinlic
05ecba5cc2 修复代码生成调用输出 2024-12-14 01:54:49 +08:00
Vinlic科技
a969acd6fb
Merge pull request #43 from Alex-Yanggg/master
Update the Doc
2024-12-12 13:48:16 +08:00
Alex
54805f2475
Update README_EN.md 2024-12-12 16:45:11 +11:00
Alex
b402f99960
Update README.md
更改目录层级,添加英文入口
2024-12-12 15:52:43 +11:00
Alex
c1f5e9ae78
Create README_EN.md 2024-12-12 15:51:19 +11:00
Vinlic科技
fe3f0784c8
Update README.md 2024-12-04 17:05:38 +08:00
Vinlic
3237198289 Release 0.0.32 2024-07-28 04:06:46 +08:00
Vinlic
f56e582ec6 支持视频生成接口 2024-07-28 04:05:50 +08:00
Vinlic
d53a39f45a Release 0.0.31 2024-06-12 11:32:58 +08:00
Vinlic
6519a575fa Merge branch 'master' of https://github.com/LLM-Red-Team/glm-free-api 2024-06-12 11:31:59 +08:00
Vinlic
72b3698757 修复DALL-E兼容图像接口 2024-06-12 11:31:45 +08:00
Vinlic科技
620f6202de
Update README.md 2024-05-15 13:36:57 +08:00
Vinlic科技
90a9a702ed
Update README.md 2024-05-11 11:42:05 +08:00
Vinlic
34282ad837 Release 0.0.30 2024-05-06 11:19:39 +08:00
Vinlic
53f3365872 优化非流式搜索结果展示 2024-05-06 11:19:13 +08:00
Vinlic
cb044beae0 Merge branch 'master' of https://github.com/LLM-Red-Team/glm-free-api 2024-05-06 09:46:39 +08:00
Vinlic
9b650fab98 Release 0.0.29 2024-05-06 09:46:18 +08:00
Vinlic
afc9c96d02 去除非流式请求中的来源标记 2024-05-06 09:46:03 +08:00
Vinlic科技
506de6f791
Update README.md 2024-05-04 17:06:27 +08:00
Vinlic
6f48e347eb Release 0.0.27 2024-04-29 09:42:35 +08:00
Vinlic
4a450b8848 支持首轮传none对话ID 2024-04-29 09:42:21 +08:00
Vinlic
756c89aaef 处理首轮传送文件时导致对话合并问题 2024-04-28 17:37:16 +08:00
Vinlic
a2770fea93 update README 2024-04-28 14:14:03 +08:00
Vinlic
14078ac54f 去除调试 2024-04-28 10:18:54 +08:00
Vinlic
a837ed2e65 Release 0.0.26 2024-04-28 10:18:33 +08:00
Vinlic
295e69e6cb 支持原生的多轮对话 2024-04-28 10:16:29 +08:00
Vinlic科技
a695921e73
Update README.md 2024-04-26 16:56:11 +08:00
Vinlic
50d07922c9 Release 0.0.25 2024-04-26 16:20:04 +08:00
Vinlic
600a42938c 修复多轮对话时文件传送角色错配,抹除临时文件路径减少多轮幻觉 2024-04-26 16:19:57 +08:00
Vinlic
c70a4a4102 Release 0.0.24 2024-04-25 09:39:28 +08:00
Vinlic
376bb55ce3 Merge branch 'master' of https://github.com/LLM-Red-Team/glm-free-api 2024-04-25 09:38:53 +08:00
Vinlic
64e43ffd2c 优化绘图接口 2024-04-25 09:38:35 +08:00
Vinlic科技
99bd36d92f
Merge pull request #16 from KPCOFGS/master
更新了README.md文件
2024-04-25 02:31:05 +08:00
Shi Sheng
2b4dc650f1
Update README.md 2024-04-24 14:29:15 -04:00
Shi Sheng
8b56bbf3a4
Update README.md 2024-04-24 12:46:40 -04:00
Vinlic科技
8d8986b4b2
Update README.md 2024-04-24 14:33:04 +08:00
Vinlic科技
eeb3594514
Merge pull request #15 from Yanyutin753/main
feat support /v1/models to be better use lobechat
2024-04-24 13:27:44 +08:00
Yanyutin753
36e89c0b2a feat support /v1/models to be better use lobechat 2024-04-24 13:17:43 +08:00
Vinlic科技
420a9e71c0
Update README.md 2024-04-22 16:49:16 +08:00
Vinlic科技
50083accf0
Update README.md 2024-04-17 01:10:32 +08:00
Vinlic科技
ddd9165c70
Update README.md 2024-04-16 00:25:57 +08:00
Vinlic
d9c04ffa25 update README 2024-04-12 13:20:21 +08:00
Vinlic
1191602ad8 Merge branch 'master' of https://github.com/LLM-Red-Team/glm-free-api 2024-04-12 13:19:32 +08:00
Vinlic
f9cb26326f update README 2024-04-12 13:19:19 +08:00
Vinlic科技
665fb5724b
Update README.md 2024-04-11 18:54:30 +08:00
Vinlic
5edf3c65a8 Release 0.0.22 2024-04-11 15:05:10 +08:00
Vinlic
a30fa1cbca npm切换到yarn,加快容器构建 2024-04-11 15:04:44 +08:00
Vinlic
32884af017 支持Render部署和Vercel部署方式 2024-04-11 15:03:43 +08:00
Vinlic
0e13700824 Release 0.0.21 2024-04-10 18:45:45 +08:00
Vinlic
f01006c4f1 增加refresh_token存活检测 2024-04-10 18:45:38 +08:00
Vinlic
83a8c00edd update Dockerfile 2024-04-10 18:40:59 +08:00
Vinlic
1a5cf591af Release 0.0.20 2024-04-10 11:06:41 +08:00
Vinlic
6528bc1be7 绘图接口支持从文本中提取md图像url 2024-04-10 11:06:30 +08:00
Vinlic
cbe12ebfbe Merge branch 'master' of https://github.com/LLM-Red-Team/glm-free-api 2024-04-09 10:37:05 +08:00
Vinlic
50903076ca 增加日志提醒错误请求地址 2024-04-09 10:36:24 +08:00
Vinlic
20cbbb4452 Release 0.0.19 2024-04-09 10:32:49 +08:00
Vinlic
0dd4fa0d07 响应为json时将错误输出 2024-04-09 10:32:07 +08:00
Vinlic
5511de7cd9 Release 0.0.18 2024-04-05 23:48:50 +08:00
Vinlic科技
869e71f6db
Merge pull request #10 from Yanyutin753/master
optimize code in messagesPrepare
2024-04-05 23:15:18 +08:00
Yanyutin753
1cd06921aa optimize code in messagesPrepare 2024-04-05 19:04:16 +08:00
Vinlic
79e2620279 Release 0.0.17 2024-04-02 23:59:23 +08:00
Vinlic
6bc76f3df7 优化合并效果 2024-04-02 23:57:43 +08:00
Vinlic科技
97bce5b5fd
Merge pull request #9 from Yanyutin753/good
fix 上下文上传文件聊天不连贯问题
2024-04-02 23:33:28 +08:00
Clivia
fed30fcd91
Merge branch 'LLM-Red-Team:master' into good 2024-04-02 23:24:44 +08:00
Yanyutin753
083014b899 fix 上下文上传文件聊天不连贯问题 2024-04-02 23:23:58 +08:00
Yanyutin753
f453c075e2 try fix 上下文上传文件聊天不连贯问题 2024-04-02 15:21:03 +08:00
Vinlic科技
26e1735795
Merge pull request #8 from Yanyutin753/good
fix logger info update files
2024-04-02 11:10:01 +08:00
Yanyutin753
1af80ca83a fix logger info update files 2024-04-02 11:04:23 +08:00
Vinlic
30bfa2aa88 update README 2024-03-31 03:49:02 +08:00
Vinlic
92a9ac08a2 Release 0.0.15 2024-03-27 22:09:24 +08:00
Vinlic
8f1951cfd3 优化提升AI绘图成功率 2024-03-27 22:08:55 +08:00
Vinlic
90a70d25f3 Release 0.0.14 2024-03-27 17:26:25 +08:00
Vinlic
fee72ca188 修复传model值时未进行智能体ID识别处理 2024-03-27 17:25:47 +08:00
Vinlic
e2cf522844 update README 2024-03-27 10:45:50 +08:00
Vinlic
fc3a40f0fe 支持指定绘图的智能体 2024-03-27 10:44:00 +08:00
Vinlic
1b1b3dd322 支持指定绘图的智能体 2024-03-27 10:43:28 +08:00
Vinlic
6226615f53 Merge branch 'master' of https://github.com/LLM-Red-Team/glm-free-api 2024-03-27 10:37:05 +08:00
Vinlic
08658e5c4f Release 0.0.12 2024-03-27 10:36:49 +08:00
Vinlic
ea49bd3023 支持兼容dalle3绘图调用接口 2024-03-27 10:36:36 +08:00
Vinlic
cea74b948b Merge branch 'master' of https://github.com/LLM-Red-Team/glm-free-api 2024-03-25 04:18:52 +08:00
Vinlic
f6278b26d8 update README 2024-03-25 04:18:14 +08:00
Vinlic
96a80f5f32 Release 0.0.11 2024-03-21 13:48:52 +08:00
Vinlic
1144841ed9 修复markdown代码输出 2024-03-21 13:48:45 +08:00
Vinlic
491bfcd84b 修复markdown代码输出 2024-03-21 13:25:53 +08:00
Vinlic
3af6958f1c Release 0.0.9 2024-03-21 13:17:48 +08:00
Vinlic
8b7ab6a252 修复调用tool之前文本导致后续内容混乱问题 2024-03-21 13:17:28 +08:00
Vinlic
0a9373ed94 update README 2024-03-21 12:19:25 +08:00
Vinlic
45ac2545f7 update README 2024-03-21 12:16:13 +08:00
Vinlic
3aa4341c86 Release 0.0.8 2024-03-21 11:52:43 +08:00
Vinlic
97ef58f554 增加支持ping 2024-03-21 11:52:31 +08:00
Vinlic
888455b56a 优化代码调用输出效果 2024-03-21 11:51:26 +08:00
Vinlic
5158dee32c update README 2024-03-21 00:55:44 +08:00
Vinlic
e835d93e30 Release 0.0.7 2024-03-21 00:39:31 +08:00
Vinlic
5eae87e697 支持代码调用过程和结果展示 2024-03-21 00:38:27 +08:00
Vinlic
c7ee175c18 修复流模式传输完毕时未成功删除会话历史的问题 2024-03-21 00:02:28 +08:00
Vinlic
b6cf5fcc16 Release 0.0.6 2024-03-19 01:45:42 +08:00
Vinlic
7a26dc381e 支持安全拦截提示、修复多轮下的错误 2024-03-19 01:45:04 +08:00
Vinlic
74689340dd update README 2024-03-17 18:20:09 +08:00
Vinlic
31c8d52c80 update README 2024-03-17 18:18:36 +08:00
Vinlic
cbb14b0198 图像解析支持 2024-03-17 18:18:11 +08:00
33 changed files with 4791 additions and 545 deletions

48
.github/workflows/sync.yml vendored Normal file
View File

@ -0,0 +1,48 @@
name: Upstream Sync
permissions:
contents: write
issues: write
actions: write
on:
schedule:
- cron: '0 * * * *' # every hour
workflow_dispatch:
jobs:
sync_latest_from_upstream:
name: Sync latest commits from upstream repo
runs-on: ubuntu-latest
if: ${{ github.event.repository.fork }}
steps:
- uses: actions/checkout@v4
- name: Clean issue notice
uses: actions-cool/issues-helper@v3
with:
actions: 'close-issues'
labels: '🚨 Sync Fail'
- name: Sync upstream changes
id: sync
uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
with:
upstream_sync_repo: LLM-Red-Team/glm-free-api
upstream_sync_branch: master
target_sync_branch: master
target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
test_mode: false
- name: Sync check
if: failure()
uses: actions-cool/issues-helper@v3
with:
actions: 'create-issue'
title: '🚨 同步失败 | Sync Fail'
labels: '🚨 Sync Fail'
body: |
Due to a change in the workflow file of the LLM-Red-Team/glm-free-api upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed [Tutorial][tutorial-en-US] for instructions.
由于 LLM-Red-Team/glm-free-api 上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,

3
.gitignore vendored
View File

@ -1,3 +1,4 @@
dist/ dist/
node_modules/ node_modules/
logs/ logs/
.vercel

View File

@ -4,14 +4,15 @@ WORKDIR /app
COPY . /app COPY . /app
RUN npm i --registry http://registry.npmmirror.com && npm run build RUN yarn install --registry https://registry.npmmirror.com/ --ignore-engines && yarn run build
FROM node:lts-alpine FROM node:lts-alpine
COPY --from=BUILD_IMAGE /app/configs ./configs COPY --from=BUILD_IMAGE /app/configs /app/configs
COPY --from=BUILD_IMAGE /app/package.json ./package.json COPY --from=BUILD_IMAGE /app/package.json /app/package.json
COPY --from=BUILD_IMAGE /app/dist ./dist COPY --from=BUILD_IMAGE /app/dist /app/dist
COPY --from=BUILD_IMAGE /app/node_modules ./node_modules COPY --from=BUILD_IMAGE /app/public /app/public
COPY --from=BUILD_IMAGE /app/node_modules /app/node_modules
WORKDIR /app WORKDIR /app

587
README.md
View File

@ -1,19 +1,594 @@
# GLM AI Free 服务 # GLM AI Free 服务
![](https://img.shields.io/github/license/llm-red-team/glm-free-api.svg) <hr>
<span>[ 中文 | <a href="README_EN.md">English</a> ]</span>
[![](https://img.shields.io/github/license/llm-red-team/glm-free-api.svg)](LICENSE)
![](https://img.shields.io/github/stars/llm-red-team/glm-free-api.svg) ![](https://img.shields.io/github/stars/llm-red-team/glm-free-api.svg)
![](https://img.shields.io/github/forks/llm-red-team/glm-free-api.svg) ![](https://img.shields.io/github/forks/llm-red-team/glm-free-api.svg)
![](https://img.shields.io/docker/pulls/vinlic/glm-free-api.svg) ![](https://img.shields.io/docker/pulls/vinlic/glm-free-api.svg)
支持高速流式输出、支持多轮对话、支持智能体对话、支持AI绘图、支持联网搜索、支持长文档解读、支持图像解析零配置部署多路token支持自动清理会话痕迹。 支持GLM-4-Plus高速流式输出、支持多轮对话、支持智能体对话、支持Zero思考推理模型、支持视频生成、支持AI绘图、支持联网搜索、支持长文档解读、支持图像解析零配置部署多路token支持自动清理会话痕迹。
与ChatGPT接口完全兼容。 与ChatGPT接口完全兼容。
## 声明 还有以下十个free-api欢迎关注
仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担! Moonshot AIKimi.ai接口转API [kimi-free-api](https://github.com/LLM-Red-Team/kimi-free-api)
仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担! 阶跃星辰 (跃问StepChat) 接口转API [step-free-api](https://github.com/LLM-Red-Team/step-free-api)
仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担! 阿里通义 (Qwen) 接口转API [qwen-free-api](https://github.com/LLM-Red-Team/qwen-free-api)
秘塔AI (Metaso) 接口转API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
字节跳动豆包接口转API [doubao-free-api](https://github.com/LLM-Red-Team/doubao-free-api)
字节跳动即梦AI接口转API [jimeng-free-api](https://github.com/LLM-Red-Team/jimeng-free-api)
讯飞星火Spark接口转API [spark-free-api](https://github.com/LLM-Red-Team/spark-free-api)
MiniMax海螺AI接口转API [hailuo-free-api](https://github.com/LLM-Red-Team/hailuo-free-api)
深度求索DeepSeek接口转API [deepseek-free-api](https://github.com/LLM-Red-Team/deepseek-free-api)
聆心智能 (Emohaa) 接口转API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api)(当前不可用)
## 目录
* [免责声明](#免责声明)
* [效果示例](#效果示例)
* [接入准备](#接入准备)
* [智能体接入](#智能体接入)
* [多账号接入](#多账号接入)
* [Docker部署](#Docker部署)
* [Docker-compose部署](#Docker-compose部署)
* [Render部署](#Render部署)
* [Vercel部署](#Vercel部署)
* [原生部署](#原生部署)
* [推荐使用客户端](#推荐使用客户端)
* [接口列表](#接口列表)
* [对话补全](#对话补全)
* [视频生成](#视频生成)
* [AI绘图](#AI绘图)
* [文档解读](#文档解读)
* [图像解析](#图像解析)
* [refresh_token存活检测](#refresh_token存活检测)
* [注意事项](#注意事项)
* [Nginx反代优化](#Nginx反代优化)
* [Token统计](#Token统计)
* [Star History](#star-history)
## 免责声明
**逆向API是不稳定的建议前往智谱AI官方 https://open.bigmodel.cn/ 付费使用API避免封禁的风险。**
**本组织和个人不接受任何资金捐助和交易,此项目是纯粹研究交流学习性质!**
**仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!**
**仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!**
**仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!**
## 效果示例
### 验明正身Demo
![验明正身](./doc/example-1.png)
### 智能体对话Demo
对应智能体链接:[网抑云评论生成器](https://chatglm.cn/main/gdetail/65c046a531d3fcb034918abe)
![智能体对话](./doc/example-9.png)
### 结合Dify工作流Demo
体验地址https://udify.app/chat/m46YgeVLNzFh4zRs
<img width="390" alt="image" src="https://github.com/LLM-Red-Team/glm-free-api/assets/20235341/4773b9f6-b1ca-460c-b3a7-c56bdb1f0659">
### 多轮对话Demo
![多轮对话](./doc/example-6.png)
### 视频生成Demo
[点击预览](https://sfile.chatglm.cn/testpath/video/c1f59468-32fa-58c3-bd9d-ab4230cfe3ca_0.mp4)
### AI绘图Demo
![AI绘图](./doc/example-10.png)
### 联网搜索Demo
![联网搜索](./doc/example-2.png)
### 长文档解读Demo
![长文档解读](./doc/example-5.png)
### 代码调用Demo
![代码调用](./doc/example-12.png)
### 图像解析Demo
![图像解析](./doc/example-3.png)
## 接入准备
从 [智谱清言](https://chatglm.cn/) 获取refresh_token
进入智谱清言随便发起一个对话然后F12打开开发者工具从Application > Cookies中找到`chatglm_refresh_token`的值这将作为Authorization的Bearer Token值`Authorization: Bearer TOKEN`
![example0](./doc/example-0.png)
### 智能体接入
打开智能体的聊天界面地址栏的一串ID就是智能体的ID复制下来备用这个值将用作调用时的 `model` 参数值。
![example11](./doc/example-11.png)
### 多账号接入
目前似乎限制同个账号同时只能有*一路*输出你可以通过提供多个账号的chatglm_refresh_token并使用`,`拼接提供:
`Authorization: Bearer TOKEN1,TOKEN2,TOKEN3`
每次请求服务会从中挑选一个。
## Docker部署
请准备一台具有公网IP的服务器并将8000端口开放。
拉取镜像并启动服务
```shell
docker run -it -d --init --name glm-free-api -p 8000:8000 -e TZ=Asia/Shanghai vinlic/glm-free-api:latest
```
查看服务实时日志
```shell
docker logs -f glm-free-api
```
重启服务
```shell
docker restart glm-free-api
```
停止服务
```shell
docker stop glm-free-api
```
### Docker-compose部署
```yaml
version: '3'
services:
glm-free-api:
container_name: glm-free-api
image: vinlic/glm-free-api:latest
restart: always
ports:
- "8000:8000"
environment:
- TZ=Asia/Shanghai
```
### Render部署
**注意部分部署区域可能无法连接glm如容器日志出现请求超时或无法连接请切换其他区域部署**
**注意免费账户的容器实例将在一段时间不活动时自动停止运行这会导致下次请求时遇到50秒或更长的延迟建议查看[Render容器保活](https://github.com/LLM-Red-Team/free-api-hub/#Render%E5%AE%B9%E5%99%A8%E4%BF%9D%E6%B4%BB)**
1. fork本项目到你的github账号下。
2. 访问 [Render](https://dashboard.render.com/) 并登录你的github账号。
3. 构建你的 Web ServiceNew+ -> Build and deploy from a Git repository -> Connect你fork的项目 -> 选择部署区域 -> 选择实例类型为Free -> Create Web Service
4. 等待构建完成后复制分配的域名并拼接URL访问即可。
### Vercel部署
**注意Vercel免费账户的请求响应超时时间为10秒但接口响应通常较久可能会遇到Vercel返回的504超时错误**
请先确保安装了Node.js环境。
```shell
npm i -g vercel --registry http://registry.npmmirror.com
vercel login
git clone https://github.com/LLM-Red-Team/glm-free-api
cd glm-free-api
vercel --prod
```
## 原生部署
请准备一台具有公网IP的服务器并将8000端口开放。
请先安装好Node.js环境并且配置好环境变量确认node命令可用。
安装依赖
```shell
npm i
```
安装PM2进行进程守护
```shell
npm i -g pm2
```
编译构建看到dist目录就是构建完成
```shell
npm run build
```
启动服务
```shell
pm2 start dist/index.js --name "glm-free-api"
```
查看服务实时日志
```shell
pm2 logs glm-free-api
```
重启服务
```shell
pm2 reload glm-free-api
```
停止服务
```shell
pm2 stop glm-free-api
```
## 推荐使用客户端
使用以下二次开发客户端接入free-api系列项目更快更简单支持文档/图像上传!
由 [Clivia](https://github.com/Yanyutin753/lobe-chat) 二次开发的LobeChat [https://github.com/Yanyutin753/lobe-chat](https://github.com/Yanyutin753/lobe-chat)
由 [时光@](https://github.com/SuYxh) 二次开发的ChatGPT Web [https://github.com/SuYxh/chatgpt-web-sea](https://github.com/SuYxh/chatgpt-web-sea)
## 接口列表
目前支持与openai兼容的 `/v1/chat/completions` 接口可自行使用与openai或其他兼容的客户端接入接口或者使用 [dify](https://dify.ai/) 等线上服务接入使用。
### 对话补全
对话补全接口与openai的 [chat-completions-api](https://platform.openai.com/docs/guides/text-generation/chat-completions-api) 兼容。
**POST /v1/chat/completions**
header 需要设置 Authorization 头部:
```
Authorization: Bearer [refresh_token]
```
请求数据:
```json
{
// 默认模型glm-4-plus
// zero思考推理模型glm-4-zero / glm-4-think
// 如果使用智能体请填写智能体ID到此处
"model": "glm-4-plus",
// 目前多轮对话基于消息合并实现某些场景可能导致能力下降且受单轮最大token数限制
// 如果您想获得原生的多轮对话体验可以传入首轮消息获得的id来接续上下文
// "conversation_id": "65f6c28546bae1f0fbb532de",
"messages": [
{
"role": "user",
"content": "你叫什么?"
}
],
// 如果使用SSE流请设置为true默认false
"stream": false
}
```
响应数据:
```json
{
// 如果想获得原生多轮对话体验此id你可以传入到下一轮对话的conversation_id来接续上下文
"id": "65f6c28546bae1f0fbb532de",
"model": "glm-4",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "我叫智谱清言,是基于智谱 AI 公司于 2023 年训练的 ChatGLM 开发的。我的任务是针对用户的问题和要求提供适当的答复和支持。"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 1710152062
}
```
### 视频生成
视频生成接口
**如果您的账号未开通VIP可能会因排队导致生成耗时较久**
**POST /v1/videos/generations**
header 需要设置 Authorization 头部:
```
Authorization: Bearer [refresh_token]
```
请求数据:
```json
{
// 模型名称
// cogvideox默认官方视频模型
// cogvideox-pro先生成图像再作为参考图像生成视频作为视频首帧引导视频效果但耗时更长
"model": "cogvideox",
// 视频生成提示词
"prompt": "一只可爱的猫走在花丛中",
// 支持使用图像URL或者BASE64_URL作为视频首帧参考图像如果使用cogvideox-pro则会忽略此参数
// "image_url": "https://sfile.chatglm.cn/testpath/b5341945-3839-522c-b4ab-a6268cb131d5_0.png",
// 支持设置视频风格卡通3D/黑白老照片/油画/电影感
// "video_style": "油画",
// 支持设置情感氛围:温馨和谐/生动活泼/紧张刺激/凄凉寂寞
// "emotional_atmosphere": "生动活泼",
// 支持设置运镜方式:水平/垂直/推近/拉远
// "mirror_mode": "水平"
}
```
响应数据:
```json
{
"created": 1722103836,
"data": [
{
// 对话ID目前没啥用
"conversation_id": "66a537ec0603e53bccb8900a",
// 封面URL
"cover_url": "https://sfile.chatglm.cn/testpath/video_cover/c1f59468-32fa-58c3-bd9d-ab4230cfe3ca_cover_0.png",
// 视频URL
"video_url": "https://sfile.chatglm.cn/testpath/video/c1f59468-32fa-58c3-bd9d-ab4230cfe3ca_0.mp4",
// 视频时长
"video_duration": "6s",
// 视频分辨率
"resolution": "1440×960"
}
]
}
```
### AI绘图
图像生成接口与openai的 [images-create-api](https://platform.openai.com/docs/api-reference/images/create) 兼容。
**POST /v1/images/generations**
header 需要设置 Authorization 头部:
```
Authorization: Bearer [refresh_token]
```
请求数据:
```json
{
// 如果使用智能体请填写智能体ID到此处否则可以乱填
"model": "cogview-3",
"prompt": "一只可爱的猫"
}
```
响应数据:
```json
{
"created": 1711507449,
"data": [
{
"url": "https://sfile.chatglm.cn/testpath/5e56234b-34ae-593c-ba4e-3f7ba77b5768_0.png"
}
]
}
```
### 文档解读
提供一个可访问的文件URL或者BASE64_URL进行解析。
**POST /v1/chat/completions**
header 需要设置 Authorization 头部:
```
Authorization: Bearer [refresh_token]
```
请求数据:
```json
{
// 如果使用智能体请填写智能体ID到此处否则可以乱填
"model": "glm-4",
"messages": [
{
"role": "user",
"content": [
{
"type": "file",
"file_url": {
"url": "https://mj101-1317487292.cos.ap-shanghai.myqcloud.com/ai/test.pdf"
}
},
{
"type": "text",
"text": "文档里说了什么?"
}
]
}
],
// 如果使用SSE流请设置为true默认false
"stream": false
}
```
响应数据:
```json
{
"id": "cnmuo7mcp7f9hjcmihn0",
"model": "glm-4",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "根据文档内容,我总结如下:\n\n这是一份关于希腊罗马时期的魔法咒语和仪式的文本包含几个魔法仪式\n\n1. 一个涉及面包、仪式场所和特定咒语的仪式,用于使某人爱上你。\n\n2. 一个针对女神赫卡忒的召唤仪式,用来折磨某人直到她自愿来到你身边。\n\n3. 一个通过念诵爱神阿芙罗狄蒂的秘密名字,连续七天进行仪式,来赢得一个美丽女子的心。\n\n4. 一个通过燃烧没药并念诵咒语,让一个女子对你产生强烈欲望的仪式。\n\n这些仪式都带有魔法和迷信色彩使用各种咒语和象征性行为来影响人的感情和意愿。"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 100920
}
```
### 图像解析
提供一个可访问的图像URL或者BASE64_URL进行解析。
此格式兼容 [gpt-4-vision-preview](https://platform.openai.com/docs/guides/vision) API格式您也可以用这个格式传送文档进行解析。
**POST /v1/chat/completions**
header 需要设置 Authorization 头部:
```
Authorization: Bearer [refresh_token]
```
请求数据:
```json
{
"model": "65c046a531d3fcb034918abe",
"messages": [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "http://1255881664.vod2.myqcloud.com/6a0cd388vodbj1255881664/7b97ce1d3270835009240537095/uSfDwh6ZpB0A.png"
}
},
{
"type": "text",
"text": "图像描述了什么?"
}
]
}
],
"stream": false
}
```
响应数据:
```json
{
"id": "65f6c28546bae1f0fbb532de",
"model": "glm",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "图片中展示的是一个蓝色背景下的logo具体地左边是一个由多个蓝色的圆点组成的圆形图案右边是“智谱·AI”四个字字体颜色为蓝色。"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 1710670469
}
```
### refresh_token存活检测
检测refresh_token是否存活如果存活live未true否则为false请不要频繁小于10分钟调用此接口。
**POST /token/check**
请求数据:
```json
{
"token": "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9..."
}
```
响应数据:
```json
{
"live": true
}
```
## 注意事项
### Nginx反代优化
如果您正在使用Nginx反向代理glm-free-api请添加以下配置项优化流的输出效果优化体验感。
```nginx
# 关闭代理缓冲。当设置为off时Nginx会立即将客户端请求发送到后端服务器并立即将从后端服务器接收到的响应发送回客户端。
proxy_buffering off;
# 启用分块传输编码。分块传输编码允许服务器为动态生成的内容分块发送数据,而不需要预先知道内容的大小。
chunked_transfer_encoding on;
# 开启TCP_NOPUSH这告诉Nginx在数据包发送到客户端之前尽可能地发送数据。这通常在sendfile使用时配合使用可以提高网络效率。
tcp_nopush on;
# 开启TCP_NODELAY这告诉Nginx不延迟发送数据立即发送小数据包。在某些情况下这可以减少网络的延迟。
tcp_nodelay on;
# 设置保持连接的超时时间这里设置为120秒。如果在这段时间内客户端和服务器之间没有进一步的通信连接将被关闭。
keepalive_timeout 120;
```
### Token统计
由于推理侧不在glm-free-api因此token不可统计将以固定数字返回。
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=LLM-Red-Team/glm-free-api&type=Date)](https://star-history.com/#LLM-Red-Team/glm-free-api&Date)

596
README_EN.md Normal file
View File

@ -0,0 +1,596 @@
# GLM AI Free Service
[![](https://img.shields.io/github/license/llm-red-team/glm-free-api.svg)](LICENSE)
![](https://img.shields.io/github/stars/llm-red-team/glm-free-api.svg)
![](https://img.shields.io/github/forks/llm-red-team/glm-free-api.svg)
![](https://img.shields.io/docker/pulls/vinlic/glm-free-api.svg)
Supports high-speed streaming output, multi-turn dialogues, internet search, long document reading, image analysis, zero-configuration deployment, multi-token support, and automatic session trace cleanup.
Fully compatible with the ChatGPT interface.
Also, the following free APIs are available for your attention:
Moonshot AI (Kimi.ai) API to API [kimi-free-api](https://github.com/LLM-Red-Team/kimi-free-api/tree/master)
StepFun (StepChat) API to API [step-free-api](https://github.com/LLM-Red-Team/step-free-api)
Ali Tongyi (Qwen) API to API [qwen-free-api](https://github.com/LLM-Red-Team/qwen-free-api)
ZhipuAI (ChatGLM) API to API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
ByteDance (Doubao) API to API [doubao-free-api](https://github.com/LLM-Red-Team/doubao-free-api)
Meta Sota (metaso) API to API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
Iflytek Spark (Spark) API to API [spark-free-api](https://github.com/LLM-Red-Team/spark-free-api)
MiniMaxHailuoAPI to API [hailuo-free-api](https://github.com/LLM-Red-Team/hailuo-free-api)
DeepSeekDeepSeekAPI to API [deepseek-free-api](https://github.com/LLM-Red-Team/deepseek-free-api)
Lingxin Intelligence (Emohaa) API to API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api) (OUT OF ORDER)
## Table of Contents
* [Announcement](#Announcement)
* [Online Experience](#Online-Experience)
* [Effect Examples](#Effect-Examples)
* [Access Preparation](#Access-Preparation)
* [Agent Access](#Agent-Access)
* [Multiple Account Access](#Multiple-Account-Access)
* [Docker Deployment](#Docker-Deployment)
* [Docker-compose Deployment](#Docker-compose-Deployment)
* [Render Deployment](#Render-Deployment)
* [Vercel Deployment](#Vercel-Deployment)
* [Native Deployment](#Native-Deployment)
* [Recommended Clients](#Recommended-Clients)
* [Interface List](#Interface-List)
* [Conversation Completion](#Conversation-Completion)
* [Video Generation](#Video-Generation)
* [AI Drawing](#AI-Drawing)
* [Document Interpretation](#Document-Interpretation)
* [Image Analysis](#Image-Analysis)
* [Refresh_token Survival Detection](#Refresh_token-Survival-Detection)
* [Notification](#Notification)
* [Nginx Anti-generation Optimization](#Nginx-Anti-generation-Optimization)
* [Token Statistics](#Token-Statistics)
* [Star History](#star-history)
## Announcement
**This API is unstable. So we highly recommend you go to the [Zhipu](https://open.bigmodel.cn/) use the offical API, avoiding banned.**
**This organization and individuals do not accept any financial donations and transactions. This project is purely for research, communication, and learning purposes!**
**For personal use only, it is forbidden to provide services or commercial use externally to avoid causing service pressure on the official, otherwise, bear the risk yourself!**
**For personal use only, it is forbidden to provide services or commercial use externally to avoid causing service pressure on the official, otherwise, bear the risk yourself!**
**For personal use only, it is forbidden to provide services or commercial use externally to avoid causing service pressure on the official, otherwise, bear the risk yourself!**
## Online Experience
This link is only for temporary testing of functions and cannot be used for a long time. For long-term use, please deploy by yourself.
https://udify.app/chat/Pe89TtaX3rKXM8NS
## Effect Examples
### Identity Verification
![Identity Verification](./doc/example-1.png)
### AI-Agent
Agent link[Comments Generator](https://chatglm.cn/main/gdetail/65c046a531d3fcb034918abe)
![AI-Agent](./doc/example-9.png)
### Combined with Dify workflow
Experience linkhttps://udify.app/chat/m46YgeVLNzFh4zRs
<img width="390" alt="image" src="https://github.com/LLM-Red-Team/glm-free-api/assets/20235341/4773b9f6-b1ca-460c-b3a7-c56bdb1f0659">
### Multi-turn Dialogue
![Multi-turn Dialogue](./doc/example-6.png)
### Video Generation
[View](https://sfile.chatglm.cn/testpath/video/c1f59468-32fa-58c3-bd9d-ab4230cfe3ca_0.mp4)
### AI Drawing
![AI Drawing](./doc/example-10.png)
### Internet Search
![Internet Search](./doc/example-2.png)
### Long Document Reading
![Long Document Reading](./doc/example-5.png)
### Using Code
![Using Code](./doc/example-12.png)
### Image Analysis
![Image Analysis](./doc/example-3.png)
## Access Preparation
Obtain `refresh_token` from [Zhipu](https://chatglm.cn/)
Enter Zhipu Qingyan and start a random conversation, then press F12 to open the developer tools. Find the value of `tongyi_sso_ticket` in Application > Cookies, which will be used as the Bearer Token value for Authorization: `Authorization: Bearer TOKEN`
![example0](./doc/example-0.png)
### Agent Access
Open a window of Agent Chat, the ID in the url is the ID of the Agent, which is the parameter of `model`.
![example11](./doc/example-11.png)
### Multiple Account Access
You can provide multiple account chatglm_refresh_tokens and use `,` to join them:
`Authorization: Bearer TOKEN1,TOKEN2,TOKEN3`
The service will pick one each time a request is made.
## Docker Deployment
Please prepare a server with a public IP and open port 8000.
Pull the image and start the service
```shell
docker run -it -d --init --name step-free-api -p 8000:8000 -e TZ=Asia/Shanghai vinlic/step-free-api:latest
```
check real-time service logs
```shell
docker logs -f glm-free-api
```
Restart service
```shell
docker restart glm-free-api
```
Shut down service
```shell
docker stop glm-free-api
```
### Docker-compose Deployment
```yaml
version: '3'
services:
glm-free-api:
container_name: glm-free-api
image: vinlic/glm-free-api:latest
restart: always
ports:
- "8000:8000"
environment:
- TZ=Asia/Shanghai
```
### Render Deployment
**Attention: Some deployment regions may not be able to connect to Kimi. If container logs show request timeouts or connection failures (Singapore has been tested and found unavailable), please switch to another deployment region!**
**Attention: Container instances for free accounts will automatically stop after a period of inactivity, which may result in a 50-second or longer delay during the next request. It is recommended to check [Render Container Keepalive](https://github.com/LLM-Red-Team/free-api-hub/#Render%E5%AE%B9%E5%99%A8%E4%BF%9D%E6%B4%BB)**
1. Fork this project to your GitHub account.
2. Visit [Render](https://dashboard.render.com/) and log in with your GitHub account.
3. Build your Web Service (`New+` -> `Build and deploy from a Git repository` -> `Connect your forked project` -> `Select deployment region` -> `Choose instance type as Free` -> `Create Web Service`).
4. After the build is complete, copy the assigned domain and append the URL to access it.
### Vercel Deployment
**Note: Vercel free accounts have a request response timeout of 10 seconds, but interface responses are usually longer, which may result in a 504 timeout error from Vercel!**
Please ensure that Node.js environment is installed first.
```shell
npm i -g vercel --registry http://registry.npmmirror.com
vercel login
git clone https://github.com/LLM-Red-Team/glm-free-api
cd glm-free-api
vercel --prod
```
## Native Deployment
Please prepare a server with a public IP and open port 8000.
Please install the Node.js environment and configure the environment variables first, and confirm that the node command is available.
Install dependencies
```shell
npm i
```
Install PM2 for process guarding
```shell
npm i -g pm2
```
Compile and build. When you see the dist directory, the build is complete.
```shell
npm run build
```
Start service
```shell
pm2 start dist/index.js --name "glm-free-api"
```
View real-time service logs
```shell
pm2 logs glm-free-api
```
Restart service
```shell
pm2 reload glm-free-api
```
Shut down service
```shell
pm2 stop glm-free-api
```
## Recommended Clients
Using the following second-developed clients for free-api series projects is faster and easier, and supports document/image uploads!
[Clivia](https://github.com/Yanyutin753/lobe-chat)'s modified LobeChat [https://github.com/Yanyutin753/lobe-chat](https://github.com/Yanyutin753/lobe-chat)
[Time@](https://github.com/SuYxh)'s modified ChatGPT Web [https://github.com/SuYxh/chatgpt-web-sea](https://github.com/SuYxh/chatgpt-web-sea)
## interface List
Currently, the `/v1/chat/completions` interface compatible with openai is supported. You can use the client access interface compatible with openai or other clients, or use online services such as [dify](https://dify.ai/) Access and use.
### Conversation Completion
Conversation completion interface, compatible with openai's [chat-completions-api](https://platform.openai.com/docs/guides/text-generation/chat-completions-api).
**POST /v1/chat/completions**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
// Default model: glm-4-plus
// zero thinking model: glm-4-zero / glm-4-think
// If using the Agent, fill in the Agent ID here
"model": "glm-4",
// Currently, multi-round conversations are realized based on message merging, which in some scenarios may lead to capacity degradation and is limited by the maximum number of tokens in a single round.
// If you want a native multi-round dialog experience, you can pass in the ids obtained from the last round of messages to pick up the context
// "conversation_id": "65f6c28546bae1f0fbb532de",
"messages": [
{
"role": "user",
"content": "Who RU"
}
],
// If using SSE stream, please set it to true, the default is false
"stream": false
}
```
Response data
```json
{
"id": "65f6c28546bae1f0fbb532de",
"model": "glm-4",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "My name is Zhipu Qingyan."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 1710152062
}
```
### Video Generation
Video API
**If you're not VIP, you will wait in line for a long time.**
**POST /v1/videos/generations**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
// 模型名称
// cogvideox默认官方视频模型
// cogvideox-pro先生成图像再作为参考图像生成视频作为视频首帧引导视频效果但耗时更长
"model": "cogvideox",
// 视频生成提示词
"prompt": "一只可爱的猫走在花丛中",
// 支持使用图像URL或者BASE64_URL作为视频首帧参考图像如果使用cogvideox-pro则会忽略此参数
// "image_url": "https://sfile.chatglm.cn/testpath/b5341945-3839-522c-b4ab-a6268cb131d5_0.png",
// 支持设置视频风格卡通3D/黑白老照片/油画/电影感
// "video_style": "油画",
// 支持设置情感氛围:温馨和谐/生动活泼/紧张刺激/凄凉寂寞
// "emotional_atmosphere": "生动活泼",
// 支持设置运镜方式:水平/垂直/推近/拉远
// "mirror_mode": "水平"
}
```
Response data:
```json
{
"created": 1722103836,
"data": [
{
// 对话ID目前没啥用
"conversation_id": "66a537ec0603e53bccb8900a",
// 封面URL
"cover_url": "https://sfile.chatglm.cn/testpath/video_cover/c1f59468-32fa-58c3-bd9d-ab4230cfe3ca_cover_0.png",
// 视频URL
"video_url": "https://sfile.chatglm.cn/testpath/video/c1f59468-32fa-58c3-bd9d-ab4230cfe3ca_0.mp4",
// 视频时长
"video_duration": "6s",
// 视频分辨率
"resolution": "1440×960"
}
]
}
```
### AI Drawing
This format is compatible with the [gpt-4-vision-preview](https://platform.openai.com/docs/guides/vision) API format.
**POST /v1/images/generations**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
// 如果使用智能体请填写智能体ID到此处否则可以乱填
"model": "cogview-3",
"prompt": "A cute cat"
}
```
Response data:
```json
{
"created": 1711507449,
"data": [
{
"url": "https://sfile.chatglm.cn/testpath/5e56234b-34ae-593c-ba4e-3f7ba77b5768_0.png"
}
]
}
```
### Document Interpretation
Provide an accessible file URL or BASE64_URL to parse.
**POST /v1/chat/completions**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
// 如果使用智能体请填写智能体ID到此处否则可以乱填
"model": "glm-4",
"messages": [
{
"role": "user",
"content": [
{
"type": "file",
"file_url": {
"url": "https://mj101-1317487292.cos.ap-shanghai.myqcloud.com/ai/test.pdf"
}
},
{
"type": "text",
"text": "文档里说了什么?"
}
]
}
],
// 如果使用SSE流请设置为true默认false
"stream": false
}
```
Response data:
```json
{
"id": "cnmuo7mcp7f9hjcmihn0",
"model": "glm-4",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "根据文档内容,我总结如下:\n\n这是一份关于希腊罗马时期的魔法咒语和仪式的文本包含几个魔法仪式\n\n1. 一个涉及面包、仪式场所和特定咒语的仪式,用于使某人爱上你。\n\n2. 一个针对女神赫卡忒的召唤仪式,用来折磨某人直到她自愿来到你身边。\n\n3. 一个通过念诵爱神阿芙罗狄蒂的秘密名字,连续七天进行仪式,来赢得一个美丽女子的心。\n\n4. 一个通过燃烧没药并念诵咒语,让一个女子对你产生强烈欲望的仪式。\n\n这些仪式都带有魔法和迷信色彩使用各种咒语和象征性行为来影响人的感情和意愿。"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 100920
}
```
### Image Analysis
Provide an accessible image URL or BASE64_URL to parse.
This format is compatible with the [gpt-4-vision-preview](https://platform.openai.com/docs/guides/vision) API format. You can also use this format to transmit documents for parsing.
**POST /v1/chat/completions**
The header needs to set the Authorization header:
```
Authorization: Bearer [refresh_token]
```
Request data:
```json
{
"model": "65c046a531d3fcb034918abe",
"messages": [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "http://1255881664.vod2.myqcloud.com/6a0cd388vodbj1255881664/7b97ce1d3270835009240537095/uSfDwh6ZpB0A.png"
}
},
{
"type": "text",
"text": "图像描述了什么?"
}
]
}
],
"stream": false
}
```
Response data:
```json
{
"id": "65f6c28546bae1f0fbb532de",
"model": "glm",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "图片中展示的是一个蓝色背景下的logo具体地左边是一个由多个蓝色的圆点组成的圆形图案右边是“智谱·AI”四个字字体颜色为蓝色。"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2
},
"created": 1710670469
}
```
### Refresh_token Survival Detection
Check whether refresh_token is alive. If live is not true, otherwise it is false. Please do not call this interface frequently (less than 10 minutes).
**POST /token/check**
Request data:
```json
{
"token": "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9..."
}
```
Response data:
```json
{
"live": true
}
```
## Notification
### Nginx Anti-generation Optimization
If you are using Nginx reverse proxy `glm-free-api`, please add the following configuration items to optimize the output effect of the stream and optimize the experience.
```nginx
# Turn off proxy buffering. When set to off, Nginx will immediately send client requests to the backend server and immediately send responses received from the backend server back to the client.
proxy_buffering off;
# Enable chunked transfer encoding. Chunked transfer encoding allows servers to send data in chunks for dynamically generated content without knowing the size of the content in advance.
chunked_transfer_encoding on;
# Turn on TCP_NOPUSH, which tells Nginx to send as much data as possible before sending the packet to the client. This is usually used in conjunction with sendfile to improve network efficiency.
tcp_nopush on;
# Turn on TCP_NODELAY, which tells Nginx not to delay sending data and to send small data packets immediately. In some cases, this can reduce network latency.
tcp_nodelay on;
#Set the timeout to keep the connection, here it is set to 120 seconds. If there is no further communication between client and server during this time, the connection will be closed.
keepalive_timeout 120;
```
### Token Statistics
Since the inference side is not in glm-free-api, the token cannot be counted and will be returned as a fixed number!!!!!
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=LLM-Red-Team/glm-free-api&type=Date)](https://star-history.com/#LLM-Red-Team/glm-free-api&Date)

BIN
doc/example-0.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 307 KiB

BIN
doc/example-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

BIN
doc/example-10.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 873 KiB

BIN
doc/example-11.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

BIN
doc/example-12.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

BIN
doc/example-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 209 KiB

BIN
doc/example-3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 283 KiB

BIN
doc/example-5.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 334 KiB

BIN
doc/example-6.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 286 KiB

BIN
doc/example-9.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

View File

@ -1,6 +1,6 @@
{ {
"name": "glm-free-api", "name": "glm-free-api",
"version": "0.0.3", "version": "0.0.35",
"description": "GLM Free API Server", "description": "GLM Free API Server",
"type": "module", "type": "module",
"main": "dist/index.js", "main": "dist/index.js",
@ -13,8 +13,8 @@
"dist/" "dist/"
], ],
"scripts": { "scripts": {
"dev": "tsup src/index.ts --format cjs,esm --sourcemap --dts --publicDir public --watch --onSuccess \"node dist/index.js\"", "dev": "tsup src/index.ts --format cjs,esm --sourcemap --dts --publicDir public --watch --onSuccess \"node --enable-source-maps dist/index.js\"",
"start": "node dist/index.js", "start": "node --enable-source-maps dist/index.js",
"build": "tsup src/index.ts --format cjs,esm --sourcemap --dts --clean --publicDir public" "build": "tsup src/index.ts --format cjs,esm --sourcemap --dts --clean --publicDir public"
}, },
"author": "Vinlic", "author": "Vinlic",
@ -38,6 +38,7 @@
"mime": "^4.0.1", "mime": "^4.0.1",
"minimist": "^1.2.8", "minimist": "^1.2.8",
"randomstring": "^1.3.0", "randomstring": "^1.3.0",
"sharp": "^0.33.4",
"uuid": "^9.0.1", "uuid": "^9.0.1",
"yaml": "^2.3.4" "yaml": "^2.3.4"
}, },

10
public/welcome.html Normal file
View File

@ -0,0 +1,10 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>🚀 服务已启动</title>
</head>
<body>
<p>glm-free-api已启动<br>请通过LobeChat / NextChat / Dify等客户端或OpenAI SDK接入</p>
</body>
</html>

View File

@ -5,5 +5,8 @@ export default {
API_TOKEN_EXPIRES: [-2002, 'Token已失效'], API_TOKEN_EXPIRES: [-2002, 'Token已失效'],
API_FILE_URL_INVALID: [-2003, '远程文件URL非法'], API_FILE_URL_INVALID: [-2003, '远程文件URL非法'],
API_FILE_EXECEEDS_SIZE: [-2004, '远程文件超出大小'], API_FILE_EXECEEDS_SIZE: [-2004, '远程文件超出大小'],
API_CHAT_STREAM_PUSHING: [-2005, '已有对话流正在输出'] API_CHAT_STREAM_PUSHING: [-2005, '已有对话流正在输出'],
API_CONTENT_FILTERED: [-2006, '内容由于合规问题已被阻止生成'],
API_IMAGE_GENERATION_FAILED: [-2007, '图像生成失败'],
API_VIDEO_GENERATION_FAILED: [-2008, '视频生成失败'],
} }

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,9 @@ import Response from '@/lib/response/Response.ts';
import chat from '@/api/controllers/chat.ts'; import chat from '@/api/controllers/chat.ts';
import logger from '@/lib/logger.ts'; import logger from '@/lib/logger.ts';
// zero推理模型智能体ID
const ZERO_ASSISTANT_ID = "676411c38945bbc58a905d31";
export default { export default {
prefix: '/v1/chat', prefix: '/v1/chat',
@ -13,22 +16,23 @@ export default {
'/completions': async (request: Request) => { '/completions': async (request: Request) => {
request request
.validate('body.conversation_id', v => _.isUndefined(v) || _.isString(v))
.validate('body.messages', _.isArray) .validate('body.messages', _.isArray)
.validate('headers.authorization', _.isString) .validate('headers.authorization', _.isString)
// refresh_token切分 // refresh_token切分
const tokens = chat.tokenSplit(request.headers.authorization); const tokens = chat.tokenSplit(request.headers.authorization);
// 随机挑选一个refresh_token // 随机挑选一个refresh_token
const token = _.sample(tokens); const token = _.sample(tokens);
const messages = request.body.messages; const { model, conversation_id: convId, messages, stream } = request.body;
const assistantId = /^[a-z0-9]{24,}$/.test(request.body.model) ? request.body.model : undefined
if (request.body.stream) { if (stream) {
const stream = await chat.createCompletionStream(request.body.messages, token, assistantId); const stream = await chat.createCompletionStream(messages, token, model, convId);
return new Response(stream, { return new Response(stream, {
type: "text/event-stream" type: "text/event-stream"
}); });
} }
else else
return await chat.createCompletion(messages, token, assistantId); return await chat.createCompletion(messages, token, model, convId);
} }
} }

39
src/api/routes/images.ts Normal file
View File

@ -0,0 +1,39 @@
import _ from "lodash";
import Request from "@/lib/request/Request.ts";
import chat from "@/api/controllers/chat.ts";
import util from "@/lib/util.ts";
export default {
prefix: "/v1/images",
post: {
"/generations": async (request: Request) => {
request
.validate("body.prompt", _.isString)
.validate("headers.authorization", _.isString);
// refresh_token切分
const tokens = chat.tokenSplit(request.headers.authorization);
// 随机挑选一个refresh_token
const token = _.sample(tokens);
const prompt = request.body.prompt;
const responseFormat = _.defaultTo(request.body.response_format, "url");
const assistantId = /^[a-z0-9]{24,}$/.test(request.body.model) ? request.body.model : undefined
const imageUrls = await chat.generateImages(assistantId, prompt, token);
let data = [];
if (responseFormat == "b64_json") {
data = (
await Promise.all(imageUrls.map((url) => util.fetchFileBASE64(url)))
).map((b64) => ({ b64_json: b64 }));
} else {
data = imageUrls.map((url) => ({
url,
}));
}
return {
created: util.unixTimestamp(),
data,
};
},
},
};

View File

@ -1,5 +1,31 @@
import fs from 'fs-extra';
import Response from '@/lib/response/Response.ts';
import chat from "./chat.ts"; import chat from "./chat.ts";
import images from "./images.ts";
import videos from './videos.ts';
import ping from "./ping.ts";
import token from './token.js';
import models from './models.ts';
export default [ export default [
chat {
get: {
'/': async () => {
const content = await fs.readFile('public/welcome.html');
return new Response(content, {
type: 'html',
headers: {
Expires: '-1'
}
});
}
}
},
chat,
images,
videos,
ping,
token,
models
]; ];

46
src/api/routes/models.ts Normal file
View File

@ -0,0 +1,46 @@
import _ from 'lodash';
export default {
prefix: '/v1',
get: {
'/models': async () => {
return {
"data": [
{
"id": "glm-3-turbo",
"object": "model",
"owned_by": "glm-free-api"
},
{
"id": "glm-4",
"object": "model",
"owned_by": "glm-free-api"
},
{
"id": "glm-4-plus",
"object": "model",
"owned_by": "glm-free-api"
},
{
"id": "glm-4v",
"object": "model",
"owned_by": "glm-free-api"
},
{
"id": "glm-v1",
"object": "model",
"owned_by": "glm-free-api"
},
{
"id": "glm-v1-vision",
"object": "model",
"owned_by": "glm-free-api"
}
]
};
}
}
}

6
src/api/routes/ping.ts Normal file
View File

@ -0,0 +1,6 @@
export default {
prefix: '/ping',
get: {
'': async () => "pong"
}
}

25
src/api/routes/token.ts Normal file
View File

@ -0,0 +1,25 @@
import _ from 'lodash';
import Request from '@/lib/request/Request.ts';
import Response from '@/lib/response/Response.ts';
import chat from '@/api/controllers/chat.ts';
import logger from '@/lib/logger.ts';
export default {
prefix: '/token',
post: {
'/check': async (request: Request) => {
request
.validate('body.token', _.isString)
const live = await chat.getTokenLiveStatus(request.body.token);
return {
live
}
}
}
}

78
src/api/routes/videos.ts Normal file
View File

@ -0,0 +1,78 @@
import _ from "lodash";
import Request from "@/lib/request/Request.ts";
import chat from "@/api/controllers/chat.ts";
import util from "@/lib/util.ts";
export default {
prefix: "/v1/videos",
post: {
"/generations": async (request: Request) => {
request
.validate(
"body.conversation_id",
(v) => _.isUndefined(v) || _.isString(v)
)
.validate("body.model", (v) => _.isUndefined(v) || _.isString(v))
.validate("body.prompt", _.isString)
.validate("body.audio_id", (v) => _.isUndefined(v) || _.isString(v))
.validate("body.image_url", (v) => _.isUndefined(v) || _.isString(v))
.validate(
"body.video_style",
(v) =>
_.isUndefined(v) ||
["卡通3D", "黑白老照片", "油画", "电影感"].includes(v),
"video_style must be one of 卡通3D/黑白老照片/油画/电影感"
)
.validate(
"body.emotional_atmosphere",
(v) =>
_.isUndefined(v) ||
["温馨和谐", "生动活泼", "紧张刺激", "凄凉寂寞"].includes(v),
"emotional_atmosphere must be one of 温馨和谐/生动活泼/紧张刺激/凄凉寂寞"
)
.validate(
"body.mirror_mode",
(v) =>
_.isUndefined(v) || ["水平", "垂直", "推近", "拉远"].includes(v),
"mirror_mode must be one of 水平/垂直/推近/拉远"
)
.validate("headers.authorization", _.isString);
// refresh_token切分
const tokens = chat.tokenSplit(request.headers.authorization);
// 随机挑选一个refresh_token
const token = _.sample(tokens);
const {
model,
conversation_id: convId,
prompt,
image_url: imageUrl,
video_style: videoStyle = "",
emotional_atmosphere: emotionalAtmosphere = "",
mirror_mode: mirrorMode = "",
audio_id: audioId,
} = request.body;
const data = await chat.generateVideos(
model,
prompt,
token,
{
imageUrl,
videoStyle,
emotionalAtmosphere,
mirrorMode,
audioId,
},
convId
);
return {
created: util.unixTimestamp(),
data,
};
},
},
};

View File

@ -9,13 +9,15 @@ import { format as dateFormat } from 'date-fns';
import config from './config.ts'; import config from './config.ts';
import util from './util.ts'; import util from './util.ts';
const isVercelEnv = process.env.VERCEL;
class LogWriter { class LogWriter {
#buffers = []; #buffers = [];
constructor() { constructor() {
fs.ensureDirSync(config.system.logDirPath); !isVercelEnv && fs.ensureDirSync(config.system.logDirPath);
this.work(); !isVercelEnv && this.work();
} }
push(content) { push(content) {
@ -24,16 +26,16 @@ class LogWriter {
} }
writeSync(buffer) { writeSync(buffer) {
fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer); !isVercelEnv && fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer);
} }
async write(buffer) { async write(buffer) {
await fs.appendFile(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer); !isVercelEnv && await fs.appendFile(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), buffer);
} }
flush() { flush() {
if(!this.#buffers.length) return; if(!this.#buffers.length) return;
fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), Buffer.concat(this.#buffers)); !isVercelEnv && fs.appendFileSync(path.join(config.system.logDirPath, `/${util.getDateString()}.log`), Buffer.concat(this.#buffers));
} }
work() { work() {

View File

@ -52,7 +52,7 @@ export default class Request {
this.time = Number(_.defaultTo(time, util.timestamp())); this.time = Number(_.defaultTo(time, util.timestamp()));
} }
validate(key: string, fn?: Function) { validate(key: string, fn?: Function, message?: string) {
try { try {
const value = _.get(this, key); const value = _.get(this, key);
if (fn) { if (fn) {
@ -64,7 +64,7 @@ export default class Request {
} }
catch (err) { catch (err) {
logger.warn(`Params ${key} invalid:`, err); logger.warn(`Params ${key} invalid:`, err);
throw new APIException(EX.API_REQUEST_PARAMS_INVALID, `Params ${key} invalid`); throw new APIException(EX.API_REQUEST_PARAMS_INVALID, message || `Params ${key} invalid`);
} }
return this; return this;
} }

View File

@ -15,7 +15,7 @@ export default class FailureBody extends Body {
else if(error instanceof APIException || error instanceof Exception) else if(error instanceof APIException || error instanceof Exception)
({ errcode, errmsg, data, httpStatusCode } = error); ({ errcode, errmsg, data, httpStatusCode } = error);
else if(_.isError(error)) else if(_.isError(error))
error = new Exception(EX.SYSTEM_ERROR, error.message); ({ errcode, errmsg, data, httpStatusCode } = new Exception(EX.SYSTEM_ERROR, error.message));
super({ super({
code: errcode || -1, code: errcode || -1,
message: errmsg || 'Internal error', message: errmsg || 'Internal error',

View File

@ -73,7 +73,11 @@ class Server {
this.app.use((ctx: any) => { this.app.use((ctx: any) => {
const request = new Request(ctx); const request = new Request(ctx);
logger.debug(`-> ${ctx.request.method} ${ctx.request.url} request is not supported - ${request.remoteIP || "unknown"}`); logger.debug(`-> ${ctx.request.method} ${ctx.request.url} request is not supported - ${request.remoteIP || "unknown"}`);
const failureBody = new FailureBody(new Exception(EX.SYSTEM_NOT_ROUTE_MATCHING, "Request is not supported")); // const failureBody = new FailureBody(new Exception(EX.SYSTEM_NOT_ROUTE_MATCHING, "Request is not supported"));
// const response = new Response(failureBody);
const message = `[请求有误]: 正确请求为 POST -> /v1/chat/completions当前请求为 ${ctx.request.method} -> ${ctx.request.url} 请纠正`;
logger.warn(message);
const failureBody = new FailureBody(new Error(message));
const response = new Response(failureBody); const response = new Response(failureBody);
response.injectTo(ctx); response.injectTo(ctx);
if(config.system.requestLog) if(config.system.requestLog)

View File

@ -1,258 +1,307 @@
import os from 'os'; import os from "os";
import path from 'path'; import path from "path";
import crypto from 'crypto'; import crypto from "crypto";
import { Readable, Writable } from 'stream'; import { Readable, Writable } from "stream";
import 'colors'; import "colors";
import mime from 'mime'; import mime from "mime";
import fs from 'fs-extra'; import axios from "axios";
import { v1 as uuid } from 'uuid'; import fs from "fs-extra";
import { format as dateFormat } from 'date-fns'; import { v1 as uuid } from "uuid";
import CRC32 from 'crc-32'; import { format as dateFormat } from "date-fns";
import randomstring from 'randomstring'; import CRC32 from "crc-32";
import _ from 'lodash'; import randomstring from "randomstring";
import { CronJob } from 'cron'; import _ from "lodash";
import { CronJob } from "cron";
import HTTP_STATUS_CODE from './http-status-codes.ts'; import HTTP_STATUS_CODE from "./http-status-codes.ts";
const autoIdMap = new Map(); const autoIdMap = new Map();
const util = { const util = {
is2DArrays(value: any) {
return (
_.isArray(value) &&
(!value[0] || (_.isArray(value[0]) && _.isArray(value[value.length - 1])))
);
},
is2DArrays(value: any) { uuid: (separator = true) => (separator ? uuid() : uuid().replace(/\-/g, "")),
return _.isArray(value) && (!value[0] || (_.isArray(value[0]) && _.isArray(value[value.length - 1])));
},
uuid: (separator = true) => separator ? uuid() : uuid().replace(/\-/g, ""), autoId: (prefix = "") => {
let index = autoIdMap.get(prefix);
if (index > 999999) index = 0; //超过最大数字则重置为0
autoIdMap.set(prefix, (index || 0) + 1);
return `${prefix}${index || 1}`;
},
autoId: (prefix = '') => { ignoreJSONParse(value: string) {
let index = autoIdMap.get(prefix); const result = _.attempt(() => JSON.parse(value));
if(index > 999999) index = 0; //超过最大数字则重置为0 if (_.isError(result)) return null;
autoIdMap.set(prefix, (index || 0) + 1); return result;
return `${prefix}${index || 1}`; },
},
ignoreJSONParse(value: string) { generateRandomString(options: any): string {
const result = _.attempt(() => JSON.parse(value)); return randomstring.generate(options);
if(_.isError(result)) },
return null;
return result;
},
generateRandomString(options: any): string { getResponseContentType(value: any): string | null {
return randomstring.generate(options); return value.headers
}, ? value.headers["content-type"] || value.headers["Content-Type"]
: null;
},
getResponseContentType(value: any): string | null { mimeToExtension(value: string) {
return value.headers ? (value.headers["content-type"] || value.headers["Content-Type"]) : null; let extension = mime.getExtension(value);
}, if (extension == "mpga") return "mp3";
return extension;
},
mimeToExtension(value: string) { extractURLExtension(value: string) {
let extension = mime.getExtension(value); const extname = path.extname(new URL(value).pathname);
if(extension == "mpga") return extname.substring(1).toLowerCase();
return "mp3"; },
return extension;
},
extractURLExtension(value: string) { createCronJob(cronPatterns: any, callback?: Function) {
const extname = path.extname(new URL(value).pathname); if (!_.isFunction(callback))
return extname.substring(1).toLowerCase(); throw new Error("callback must be an Function");
}, return new CronJob(
cronPatterns,
() => callback(),
null,
false,
"Asia/Shanghai"
);
},
createCronJob(cronPatterns: any, callback?: Function) { getDateString(format = "yyyy-MM-dd", date = new Date()) {
if(!_.isFunction(callback)) throw new Error("callback must be an Function"); return dateFormat(date, format);
return new CronJob(cronPatterns, () => callback(), null, false, "Asia/Shanghai"); },
},
getDateString(format = "yyyy-MM-dd", date = new Date()) { getIPAddressesByIPv4(): string[] {
return dateFormat(date, format); const interfaces = os.networkInterfaces();
}, const addresses = [];
for (let name in interfaces) {
const networks = interfaces[name];
const results = networks.filter(
(network) =>
network.family === "IPv4" &&
network.address !== "127.0.0.1" &&
!network.internal
);
if (results[0] && results[0].address) addresses.push(results[0].address);
}
return addresses;
},
getIPAddressesByIPv4(): string[] { getMACAddressesByIPv4(): string[] {
const interfaces = os.networkInterfaces(); const interfaces = os.networkInterfaces();
const addresses = []; const addresses = [];
for (let name in interfaces) { for (let name in interfaces) {
const networks = interfaces[name]; const networks = interfaces[name];
const results = networks.filter(network => network.family === "IPv4" && network.address !== "127.0.0.1" && !network.internal); const results = networks.filter(
if (results[0] && results[0].address) (network) =>
addresses.push(results[0].address); network.family === "IPv4" &&
} network.address !== "127.0.0.1" &&
return addresses; !network.internal
}, );
if (results[0] && results[0].mac) addresses.push(results[0].mac);
}
return addresses;
},
getMACAddressesByIPv4(): string[] { generateSSEData(event?: string, data?: string, retry?: number) {
const interfaces = os.networkInterfaces(); return `event: ${event || "message"}\ndata: ${(data || "")
const addresses = []; .replace(/\n/g, "\\n")
for (let name in interfaces) { .replace(/\s/g, "\\s")}\nretry: ${retry || 3000}\n\n`;
const networks = interfaces[name]; },
const results = networks.filter(network => network.family === "IPv4" && network.address !== "127.0.0.1" && !network.internal);
if (results[0] && results[0].mac)
addresses.push(results[0].mac);
}
return addresses;
},
generateSSEData(event?: string, data?: string, retry?: number) { buildDataBASE64(type, ext, buffer) {
return `event: ${event || "message"}\ndata: ${(data || "").replace(/\n/g, "\\n").replace(/\s/g, "\\s")}\nretry: ${retry || 3000}\n\n`; return `data:${type}/${ext.replace("jpg", "jpeg")};base64,${buffer.toString(
}, "base64"
)}`;
},
buildDataBASE64(type, ext, buffer) { isLinux() {
return `data:${type}/${ext.replace("jpg", "jpeg")};base64,${buffer.toString("base64")}`; return os.platform() !== "win32";
}, },
isLinux() { isIPAddress(value) {
return os.platform() !== "win32"; return (
}, _.isString(value) &&
(/^((2[0-4]\d|25[0-5]|[01]?\d\d?)\.){3}(2[0-4]\d|25[0-5]|[01]?\d\d?)$/.test(
isIPAddress(value) { value
return _.isString(value) && (/^((2[0-4]\d|25[0-5]|[01]?\d\d?)\.){3}(2[0-4]\d|25[0-5]|[01]?\d\d?)$/.test(value) || /\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*/.test(value)); ) ||
}, /\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*/.test(
value
))
);
},
isPort(value) { isPort(value) {
return _.isNumber(value) && value > 0 && value < 65536; return _.isNumber(value) && value > 0 && value < 65536;
}, },
isReadStream(value): boolean { isReadStream(value): boolean {
return value && (value instanceof Readable || "readable" in value || value.readable); return (
}, value &&
(value instanceof Readable || "readable" in value || value.readable)
);
},
isWriteStream(value): boolean { isWriteStream(value): boolean {
return value && (value instanceof Writable || "writable" in value || value.writable); return (
}, value &&
(value instanceof Writable || "writable" in value || value.writable)
);
},
isHttpStatusCode(value) { isHttpStatusCode(value) {
return _.isNumber(value) && Object.values(HTTP_STATUS_CODE).includes(value); return _.isNumber(value) && Object.values(HTTP_STATUS_CODE).includes(value);
}, },
isURL(value) { isURL(value) {
return !_.isUndefined(value) && /^(http|https)/.test(value); return !_.isUndefined(value) && /^(http|https)/.test(value);
}, },
isSrc(value) { isSrc(value) {
return !_.isUndefined(value) && /^\/.+\.[0-9a-zA-Z]+(\?.+)?$/.test(value); return !_.isUndefined(value) && /^\/.+\.[0-9a-zA-Z]+(\?.+)?$/.test(value);
}, },
isBASE64(value) { isBASE64(value) {
return !_.isUndefined(value) && /^[a-zA-Z0-9\/\+]+(=?)+$/.test(value); return !_.isUndefined(value) && /^[a-zA-Z0-9\/\+]+(=?)+$/.test(value);
}, },
isBASE64Data(value) { isBASE64Data(value) {
return /^data:/.test(value); return /^data:/.test(value);
}, },
extractBASE64DataFormat(value): string | null { extractBASE64DataFormat(value): string | null {
const match = value.trim().match(/^data:(.+);base64,/); const match = value.trim().match(/^data:(.+);base64,/);
if(!match) return null; if (!match) return null;
return match[1]; return match[1];
}, },
removeBASE64DataHeader(value): string { removeBASE64DataHeader(value): string {
return value.replace(/^data:(.+);base64,/, ""); return value.replace(/^data:(.+);base64,/, "");
}, },
isDataString(value): boolean { isDataString(value): boolean {
return /^(base64|json):/.test(value); return /^(base64|json):/.test(value);
}, },
isStringNumber(value) { isStringNumber(value) {
return _.isFinite(Number(value)); return _.isFinite(Number(value));
}, },
isUnixTimestamp(value) { isUnixTimestamp(value) {
return /^[0-9]{10}$/.test(`${value}`); return /^[0-9]{10}$/.test(`${value}`);
}, },
isTimestamp(value) { isTimestamp(value) {
return /^[0-9]{13}$/.test(`${value}`); return /^[0-9]{13}$/.test(`${value}`);
}, },
isEmail(value) { isEmail(value) {
return /^([a-zA-Z0-9]+[_|\_|\.]?)*[a-zA-Z0-9]+@([a-zA-Z0-9]+[_|\_|\.]?)*[a-zA-Z0-9]+\.[a-zA-Z]{2,3}$/.test(value); return /^([a-zA-Z0-9]+[_|\_|\.]?)*[a-zA-Z0-9]+@([a-zA-Z0-9]+[_|\_|\.]?)*[a-zA-Z0-9]+\.[a-zA-Z]{2,3}$/.test(
}, value
);
},
isAsyncFunction(value) { isAsyncFunction(value) {
return Object.prototype.toString.call(value) === "[object AsyncFunction]"; return Object.prototype.toString.call(value) === "[object AsyncFunction]";
}, },
async isAPNG(filePath) { async isAPNG(filePath) {
let head; let head;
const readStream = fs.createReadStream(filePath, { start: 37, end: 40 }); const readStream = fs.createReadStream(filePath, { start: 37, end: 40 });
const readPromise = new Promise((resolve, reject) => { const readPromise = new Promise((resolve, reject) => {
readStream.once("end", resolve); readStream.once("end", resolve);
readStream.once("error", reject); readStream.once("error", reject);
}); });
readStream.once("data", data => head = data); readStream.once("data", (data) => (head = data));
await readPromise; await readPromise;
return head.compare(Buffer.from([0x61, 0x63, 0x54, 0x4c])) === 0; return head.compare(Buffer.from([0x61, 0x63, 0x54, 0x4c])) === 0;
}, },
unixTimestamp() { unixTimestamp() {
return parseInt(`${Date.now() / 1000}`); return parseInt(`${Date.now() / 1000}`);
}, },
timestamp() { timestamp() {
return Date.now(); return Date.now();
}, },
urlJoin(...values) { urlJoin(...values) {
let url = ""; let url = "";
for (let i = 0; i < values.length; i++) for (let i = 0; i < values.length; i++)
url += `${i > 0 ? "/" : ""}${values[i].replace(/^\/*/, "").replace(/\/*$/, "")}`; url += `${i > 0 ? "/" : ""}${values[i]
return url; .replace(/^\/*/, "")
}, .replace(/\/*$/, "")}`;
return url;
},
millisecondsToHmss(milliseconds) { millisecondsToHmss(milliseconds) {
if (_.isString(milliseconds)) return milliseconds; if (_.isString(milliseconds)) return milliseconds;
milliseconds = parseInt(milliseconds); milliseconds = parseInt(milliseconds);
const sec = Math.floor(milliseconds / 1000); const sec = Math.floor(milliseconds / 1000);
const hours = Math.floor(sec / 3600); const hours = Math.floor(sec / 3600);
const minutes = Math.floor((sec - hours * 3600) / 60); const minutes = Math.floor((sec - hours * 3600) / 60);
const seconds = sec - hours * 3600 - minutes * 60; const seconds = sec - hours * 3600 - minutes * 60;
const ms = milliseconds % 60000 - seconds * 1000; const ms = (milliseconds % 60000) - seconds * 1000;
return `${hours > 9 ? hours : "0" + hours}:${minutes > 9 ? minutes : "0" + minutes}:${seconds > 9 ? seconds : "0" + seconds}.${ms}`; return `${hours > 9 ? hours : "0" + hours}:${
}, minutes > 9 ? minutes : "0" + minutes
}:${seconds > 9 ? seconds : "0" + seconds}.${ms}`;
},
millisecondsToTimeString(milliseconds) { millisecondsToTimeString(milliseconds) {
if(milliseconds < 1000) if (milliseconds < 1000) return `${milliseconds}ms`;
return `${milliseconds}ms`; if (milliseconds < 60000)
if(milliseconds < 60000) return `${parseFloat((milliseconds / 1000).toFixed(2))}s`;
return `${parseFloat((milliseconds / 1000).toFixed(2))}s`; return `${Math.floor(milliseconds / 1000 / 60)}m${Math.floor(
return `${Math.floor(milliseconds / 1000 / 60)}m${Math.floor(milliseconds / 1000 % 60)}s`; (milliseconds / 1000) % 60
}, )}s`;
},
rgbToHex(r, g, b): string { rgbToHex(r, g, b): string {
return ((1 << 24) + (r << 16) + (g << 8) + b).toString(16).slice(1); return ((1 << 24) + (r << 16) + (g << 8) + b).toString(16).slice(1);
}, },
hexToRgb(hex) { hexToRgb(hex) {
const value = parseInt(hex.replace(/^#/, ""), 16); const value = parseInt(hex.replace(/^#/, ""), 16);
return [(value >> 16) & 255, (value >> 8) & 255, value & 255]; return [(value >> 16) & 255, (value >> 8) & 255, value & 255];
}, },
md5(value) { md5(value) {
return crypto.createHash("md5").update(value).digest("hex"); return crypto.createHash("md5").update(value).digest("hex");
}, },
crc32(value) { crc32(value) {
return _.isBuffer(value) ? CRC32.buf(value) : CRC32.str(value); return _.isBuffer(value) ? CRC32.buf(value) : CRC32.str(value);
}, },
arrayParse(value): any[] { arrayParse(value): any[] {
return _.isArray(value) ? value : [value]; return _.isArray(value) ? value : [value];
}, },
booleanParse(value) { booleanParse(value) {
return value === "true" || value === true ? true : false return value === "true" || value === true ? true : false;
}, },
encodeBASE64(value) { encodeBASE64(value) {
return Buffer.from(value).toString("base64"); return Buffer.from(value).toString("base64");
}, },
decodeBASE64(value) { decodeBASE64(value) {
return Buffer.from(value, "base64").toString(); return Buffer.from(value, "base64").toString();
}, },
async fetchFileBASE64(url: string) {
const result = await axios.get(url, {
responseType: "arraybuffer",
});
return result.data.toString("base64");
},
}; };
export default util; export default util;

27
vercel.json Normal file
View File

@ -0,0 +1,27 @@
{
"builds": [
{
"src": "./dist/*.html",
"use": "@vercel/static"
},
{
"src": "./dist/index.js",
"use": "@vercel/node"
}
],
"routes": [
{
"src": "/",
"dest": "/dist/welcome.html"
},
{
"src": "/(.*)",
"dest": "/dist",
"headers": {
"Access-Control-Allow-Credentials": "true",
"Access-Control-Allow-Methods": "GET,OPTIONS,PATCH,DELETE,POST,PUT",
"Access-Control-Allow-Headers": "X-CSRF-Token, X-Requested-With, Accept, Accept-Version, Content-Length, Content-MD5, Content-Type, Date, X-Api-Version, Content-Type, Authorization"
}
}
]
}

1918
yarn.lock Normal file

File diff suppressed because it is too large Load Diff