Merge branch 'main' into fix-incorrect-recently-pushed-new-branches-check

This commit is contained in:
yp05327 2023-12-07 11:08:24 +09:00 committed by GitHub
commit 6f5f65ee79
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
530 changed files with 2454 additions and 1781 deletions

View File

@ -128,7 +128,7 @@ rules:
"@stylistic/js/computed-property-spacing": [2, never]
"@stylistic/js/dot-location": [2, property]
"@stylistic/js/eol-last": [2]
"@stylistic/js/func-call-spacing": [2, never]
"@stylistic/js/function-call-spacing": [2, never]
"@stylistic/js/function-call-argument-newline": [0]
"@stylistic/js/function-paren-newline": [0]
"@stylistic/js/generator-star-spacing": [0]

View File

@ -8,21 +8,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
# FIXME: https://github.com/jlumbroso/free-disk-space/issues/17
- name: same as 'large-packages' but without 'google-cloud-sdk'
shell: bash
run: |
sudo apt-get update
sudo apt-get remove -y '^dotnet-.*' || true
sudo apt-get remove -y '^llvm-.*' || true
sudo apt-get remove -y 'php.*' || true
sudo apt-get remove -y '^mongodb-.*' || true
sudo apt-get remove -y '^mysql-.*' || true
sudo apt-get remove -y azure-cli google-chrome-stable firefox powershell mono-devel libgl1-mesa-dri || true
sudo apt-get autoremove -y
sudo apt-get clean
env:
DEBIAN_FRONTEND: noninteractive
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:

View File

@ -44,7 +44,7 @@ jobs:
- name: Get cleaned branch name
id: clean_name
run: |
REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\///' -e 's/release\/v//')
REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\/v//' -e 's/release\/v//')
echo "Cleaned name is ${REF_NAME}"
echo "branch=${REF_NAME}" >> "$GITHUB_OUTPUT"
- name: configure aws
@ -56,6 +56,10 @@ jobs:
- name: upload binaries to s3
run: |
aws s3 sync dist/release s3://${{ secrets.AWS_S3_BUCKET }}/gitea/${{ steps.clean_name.outputs.branch }} --no-progress
- name: Install GH CLI
uses: dev-hanz-ops/install-gh-cli-action@v0.1.0
with:
gh-cli-version: 2.39.1
- name: create github release
run: |
gh release create ${{ github.ref_name }} --title ${{ github.ref_name }} --draft --notes-from-tag dist/release/*

View File

@ -46,7 +46,7 @@ jobs:
- name: Get cleaned branch name
id: clean_name
run: |
REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\///' -e 's/release\/v//')
REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\/v//' -e 's/release\/v//')
echo "Cleaned name is ${REF_NAME}"
echo "branch=${REF_NAME}" >> "$GITHUB_OUTPUT"
- name: configure aws
@ -58,9 +58,13 @@ jobs:
- name: upload binaries to s3
run: |
aws s3 sync dist/release s3://${{ secrets.AWS_S3_BUCKET }}/gitea/${{ steps.clean_name.outputs.branch }} --no-progress
- name: Install GH CLI
uses: dev-hanz-ops/install-gh-cli-action@v0.1.0
with:
gh-cli-version: 2.39.1
- name: create github release
run: |
gh release create ${{ github.ref_name }} --title ${{ github.ref_name }} --draft --notes-from-tag dist/release/*
gh release create ${{ github.ref_name }} --title ${{ github.ref_name }} --notes-from-tag dist/release/*
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}
docker-rootful:

View File

@ -203,10 +203,20 @@ Some of the key points:
In the PR title, describe the problem you are fixing, not how you are fixing it. \
Use the first comment as a summary of your PR. \
In the PR summary, you can describe exactly how you are fixing this problem. \
In the PR summary, you can describe exactly how you are fixing this problem.
Keep this summary up-to-date as the PR evolves. \
If your PR changes the UI, you must add **after** screenshots in the PR summary. \
If you are not implementing a new feature, you should also post **before** screenshots for comparison. \
If you are not implementing a new feature, you should also post **before** screenshots for comparison.
If you are implementing a new feature, your PR will only be merged if your screenshots are up to date.\
Furthermore, feature PRs will only be merged if their summary contains a clear usage description (understandable for users) and testing description (understandable for reviewers).
You should strive to combine both into a single description.
Another requirement for merging PRs is that the PR is labeled correctly.\
However, this is not your job as a contributor, but the job of the person merging your PR.\
If you think that your PR was labeled incorrectly, or notice that it was merged without labels, please let us know.
If your PR closes some issues, you must note that in a way that both GitHub and Gitea understand, i.e. by appending a paragraph like
```text
@ -255,13 +265,16 @@ Changing the default value of a setting or replacing the setting with another on
#### How to handle breaking PRs?
If your PR has a breaking change, you must add a `BREAKING` section to your PR summary, e.g.
If your PR has a breaking change, you must add two things to the summary of your PR:
```
1. A reasoning why this breaking change is necessary
2. A `BREAKING` section explaining in simple terms (understandable for a typical user) how this PR affects users and how to mitigate these changes. This section can look for example like
```md
## :warning: BREAKING :warning:
```
To explain how this will affect users and how to mitigate these changes.
Breaking PRs will not be merged as long as not both of these requirements are met.
### Maintaining open PRs

View File

@ -10,7 +10,7 @@
<img src="https://github.com/go-gitea/gitea/actions/workflows/release-nightly.yml/badge.svg?branch=main">
</a>
<a href="https://discord.gg/Gitea" title="Join the Discord chat at https://discord.gg/Gitea">
<img src="https://img.shields.io/discord/322538954119184384.svg">
<img src="https://img.shields.io/discord/322538954119184384.svg?logo=discord&logoColor=white&label=Discord&color=5865F2">
</a>
<a href="https://app.codecov.io/gh/go-gitea/gitea" title="Codecov">
<img src="https://codecov.io/gh/go-gitea/gitea/branch/main/graph/badge.svg">

View File

@ -9,6 +9,7 @@ import (
"text/tabwriter"
auth_model "code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/models/db"
auth_service "code.gitea.io/gitea/services/auth"
"github.com/urfave/cli/v2"
@ -62,7 +63,7 @@ func runListAuth(c *cli.Context) error {
return err
}
authSources, err := auth_model.FindSources(ctx, auth_model.FindSourcesOptions{})
authSources, err := db.Find[auth_model.Source](ctx, auth_model.FindSourcesOptions{})
if err != nil {
return err
}

View File

@ -1017,7 +1017,7 @@ LEVEL = Info
;ALLOWED_TYPES =
;;
;; Max size of each file in megabytes. Defaults to 50MB
;FILE_MAX_SIZE = 50
;FILE_MAX_SIZE = 50
;;
;; Max number of files per upload. Defaults to 5
;MAX_FILES = 5
@ -2583,6 +2583,8 @@ LEVEL = Info
;ENDLESS_TASK_TIMEOUT = 3h
;; Timeout to cancel the jobs which have waiting status, but haven't been picked by a runner for a long time
;ABANDONED_JOB_TIMEOUT = 24h
;; Strings committers can place inside a commit message to skip executing the corresponding actions workflow
;SKIP_WORKFLOW_STRINGS = [skip ci],[ci skip],[no ci],[skip actions],[actions skip]
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View File

@ -343,7 +343,7 @@ The following configuration set `Content-Type: application/vnd.android.package-a
- `SSH_AUTHORIZED_PRINCIPALS_ALLOW`: **off** or **username, email**: \[off, username, email, anything\]: Specify the principals values that users are allowed to use as principal. When set to `anything` no checks are done on the principal string. When set to `off` authorized principal are not allowed to be set.
- `SSH_CREATE_AUTHORIZED_PRINCIPALS_FILE`: **false/true**: Gitea will create a authorized_principals file by default when it is not using the internal ssh server and `SSH_AUTHORIZED_PRINCIPALS_ALLOW` is not `off`.
- `SSH_AUTHORIZED_PRINCIPALS_BACKUP`: **false/true**: Enable SSH Authorized Principals Backup when rewriting all keys, default is true if `SSH_AUTHORIZED_PRINCIPALS_ALLOW` is not `off`.
- `SSH_AUTHORIZED_KEYS_COMMAND_TEMPLATE`: **{{.AppPath}} --config={{.CustomConf}} serv key-{{.Key.ID}}**: Set the template for the command to passed on authorized keys. Possible keys are: AppPath, AppWorkPath, CustomConf, CustomPath, Key - where Key is a `models/asymkey.PublicKey` and the others are strings which are shellquoted.
- `SSH_AUTHORIZED_KEYS_COMMAND_TEMPLATE`: **`{{.AppPath}} --config={{.CustomConf}} serv key-{{.Key.ID}}`**: Set the template for the command to passed on authorized keys. Possible keys are: AppPath, AppWorkPath, CustomConf, CustomPath, Key - where Key is a `models/asymkey.PublicKey` and the others are strings which are shellquoted.
- `SSH_SERVER_CIPHERS`: **chacha20-poly1305@openssh.com, aes128-ctr, aes192-ctr, aes256-ctr, aes128-gcm@openssh.com, aes256-gcm@openssh.com**: For the built-in SSH server, choose the ciphers to support for SSH connections, for system SSH this setting has no effect.
- `SSH_SERVER_KEY_EXCHANGES`: **curve25519-sha256, ecdh-sha2-nistp256, ecdh-sha2-nistp384, ecdh-sha2-nistp521, diffie-hellman-group14-sha256, diffie-hellman-group14-sha1**: For the built-in SSH server, choose the key exchange algorithms to support for SSH connections, for system SSH this setting has no effect.
- `SSH_SERVER_MACS`: **hmac-sha2-256-etm@openssh.com, hmac-sha2-256, hmac-sha1**: For the built-in SSH server, choose the MACs to support for SSH connections, for system SSH this setting has no effect
@ -1396,6 +1396,7 @@ PROXY_HOSTS = *.github.com
- `ZOMBIE_TASK_TIMEOUT`: **10m**: Timeout to stop the task which have running status, but haven't been updated for a long time
- `ENDLESS_TASK_TIMEOUT`: **3h**: Timeout to stop the tasks which have running status and continuous updates, but don't end for a long time
- `ABANDONED_JOB_TIMEOUT`: **24h**: Timeout to cancel the jobs which have waiting status, but haven't been picked by a runner for a long time
- `SKIP_WORKFLOW_STRINGS`: **[skip ci],[ci skip],[no ci],[skip actions],[actions skip]**: Strings committers can place inside a commit message to skip executing the corresponding actions workflow
`DEFAULT_ACTIONS_URL` indicates where the Gitea Actions runners should find the actions with relative path.
For example, `uses: actions/checkout@v3` means `https://github.com/actions/checkout@v3` since the value of `DEFAULT_ACTIONS_URL` is `github`.
@ -1405,7 +1406,7 @@ Please note that using `self` is not recommended for most cases, as it could mak
Additionally, it requires you to mirror all the actions you need to your Gitea instance, which may not be worth it.
Therefore, please use `self` only if you understand what you are doing.
In earlier versions (<= 1.19), `DEFAULT_ACTIONS_URL` could be set to any custom URLs like `https://gitea.com` or `http://your-git-server,https://gitea.com`, and the default value was `https://gitea.com`.
In earlier versions (`<= 1.19`), `DEFAULT_ACTIONS_URL` could be set to any custom URLs like `https://gitea.com` or `http://your-git-server,https://gitea.com`, and the default value was `https://gitea.com`.
However, later updates removed those options, and now the only options are `github` and `self`, with the default value being `github`.
However, if you want to use actions from other git server, you can use a complete URL in `uses` field, it's supported by Gitea (but not GitHub).
Like `uses: https://gitea.com/actions/checkout@v3` or `uses: http://your-git-server/actions/checkout@v3`.

View File

@ -335,7 +335,7 @@ menu:
- `SSH_AUTHORIZED_PRINCIPALS_ALLOW`: **off****username, email**\[off, username, email, anything\]:指定允许用户用作 principal 的值。当设置为 `anything` 时,对 principal 字符串不执行任何检查。当设置为 `off` 时,不允许设置授权的 principal。
- `SSH_CREATE_AUTHORIZED_PRINCIPALS_FILE`: **false/true**:当 Gitea 不使用内置 SSH 服务器且 `SSH_AUTHORIZED_PRINCIPALS_ALLOW` 不为 `off` 时,默认情况下 Gitea 会创建一个 authorized_principals 文件。
- `SSH_AUTHORIZED_PRINCIPALS_BACKUP`: **false/true**:在重写所有密钥时启用 SSH 授权 principal 备份,默认值为 true如果 `SSH_AUTHORIZED_PRINCIPALS_ALLOW` 不为 `off`)。
- `SSH_AUTHORIZED_KEYS_COMMAND_TEMPLATE`: **{{.AppPath}} --config={{.CustomConf}} serv key-{{.Key.ID}}**设置用于传递授权密钥的命令模板。可能的密钥是AppPath、AppWorkPath、CustomConf、CustomPath、Key其中 Key 是 `models/asymkey.PublicKey`,其他是 shellquoted 字符串。
- `SSH_AUTHORIZED_KEYS_COMMAND_TEMPLATE`: **`{{.AppPath}} --config={{.CustomConf}} serv key-{{.Key.ID}}`**设置用于传递授权密钥的命令模板。可能的密钥是AppPath、AppWorkPath、CustomConf、CustomPath、Key其中 Key 是 `models/asymkey.PublicKey`,其他是 shellquoted 字符串。
- `SSH_SERVER_CIPHERS`: **chacha20-poly1305@openssh.com, aes128-ctr, aes192-ctr, aes256-ctr, aes128-gcm@openssh.com, aes256-gcm@openssh.com**:对于内置的 SSH 服务器,选择支持的 SSH 连接的加密方法,对于系统 SSH此设置无效。
- `SSH_SERVER_KEY_EXCHANGES`: **curve25519-sha256, ecdh-sha2-nistp256, ecdh-sha2-nistp384, ecdh-sha2-nistp521, diffie-hellman-group14-sha256, diffie-hellman-group14-sha1**:对于内置 SSH 服务器,选择支持的 SSH 连接的密钥交换算法,对于系统 SSH此设置无效。
- `SSH_SERVER_MACS`: **hmac-sha2-256-etm@openssh.com, hmac-sha2-256, hmac-sha1**:对于内置 SSH 服务器,选择支持的 SSH 连接的 MAC 算法,对于系统 SSH此设置无效。
@ -1343,7 +1343,7 @@ PROXY_HOSTS = *.github.com
此外,它要求您将所有所需的操作镜像到您的 Gitea 实例,这可能不值得。
因此,请仅在您了解自己在做什么的情况下使用 `self`
在早期版本(<= 1.19)中,`DEFAULT_ACTIONS_URL` 可以设置为任何自定义 URL例如 `https://gitea.com``http://your-git-server,https://gitea.com`,默认值为 `https://gitea.com`
在早期版本(`<= 1.19`)中,`DEFAULT_ACTIONS_URL` 可以设置为任何自定义 URL例如 `https://gitea.com``http://your-git-server,https://gitea.com`,默认值为 `https://gitea.com`
然而,后来的更新删除了这些选项,现在唯一的选项是 `github``self`,默认值为 `github`
但是,如果您想要使用其他 Git 服务器中的操作,您可以在 `uses` 字段中使用完整的 URLGitea 支持此功能GitHub 不支持)。
例如 `uses: https://gitea.com/actions/checkout@v3``uses: http://your-git-server/actions/checkout@v3`

View File

@ -61,7 +61,7 @@ Please note: authentication is only supported when the SMTP server communication
- STARTTLS (also known as Opportunistic TLS) via port 587. Initial connection is done over cleartext, but then be upgraded over TLS if the server supports it.
- SMTPS connection (SMTP over TLS) via the default port 465. Connection to the server use TLS from the beginning.
- Forced SMTPS connection with `IS_TLS_ENABLED=true`. (These are both known as Implicit TLS.)
- Forced SMTPS connection with `PROTOCOL=smtps`. (These are both known as Implicit TLS.)
This is due to protections imposed by the Go internal libraries against STRIPTLS attacks.
Note that Implicit TLS is recommended by [RFC8314](https://tools.ietf.org/html/rfc8314#section-3) since 2018.

View File

@ -55,13 +55,13 @@ PASSWD = `password`
要发送测试邮件以验证设置,请转到 Gitea > 站点管理 > 配置 > SMTP 邮件配置。
有关所有选项的完整列表,请查看[配置速查表](doc/administration/config-cheat-sheet.zh-cn.md)。
有关所有选项的完整列表,请查看[配置速查表](administration/config-cheat-sheet.md)。
请注意:只有在使用 TLS 或 `HOST=localhost` 加密 SMTP 服务器通信时才支持身份验证。TLS 加密可以通过以下方式进行:
- 通过端口 587 的 STARTTLS也称为 Opportunistic TLS。初始连接是明文的但如果服务器支持则可以升级为 TLS。
- 通过默认端口 465 的 SMTPS 连接。连接到服务器从一开始就使用 TLS。
- 使用 `IS_TLS_ENABLED=true` 进行强制的 SMTPS 连接。(这两种方式都被称为 Implicit TLS
- 使用 `PROTOCOL=smtps` 进行强制的 SMTPS 连接。(这两种方式都被称为 Implicit TLS
这是由于 Go 内部库对 STRIPTLS 攻击的保护机制。
请注意自2018年起[RFC8314](https://tools.ietf.org/html/rfc8314#section-3) 推荐使用 Implicit TLS。

View File

@ -33,7 +33,7 @@ CERT_FILE = cert.pem
KEY_FILE = key.pem
```
请注意,如果您的证书由第三方证书颁发机构签名(即不是自签名的),则 cert.pem 应包含证书链。服务器证书必须是 cert.pem 中的第一个条目,后跟中介(如果有)。不必包含根证书,因为连接客户端必须已经拥有根证书才能建立信任关系。要了解有关配置值的更多信息,请查看 [配置备忘单](../config-cheat-sheet#server-server)。
请注意,如果您的证书由第三方证书颁发机构签名(即不是自签名的),则 cert.pem 应包含证书链。服务器证书必须是 cert.pem 中的第一个条目,后跟中介(如果有)。不必包含根证书,因为连接客户端必须已经拥有根证书才能建立信任关系。要了解有关配置值的更多信息,请查看 [配置备忘单](administration/config-cheat-sheet#server-server)。
对于“CERT_FILE”或“KEY_FILE”字段当文件路径是相对路径时文件路径相对于“GITEA_CUSTOM”环境变量。它也可以是绝对路径。

View File

@ -19,10 +19,7 @@ menu:
## Enabling/configuring API access
By default, `ENABLE_SWAGGER` is true, and
`MAX_RESPONSE_ITEMS` is set to 50. See [Config Cheat
Sheet](administration/config-cheat-sheet.md) for more
information.
By default, `ENABLE_SWAGGER` is true, and `MAX_RESPONSE_ITEMS` is set to 50. See [Config Cheat Sheet](administration/config-cheat-sheet.md) for more information.
## Authentication

View File

@ -19,8 +19,7 @@ menu:
## 开启/配置 API 访问
通常情况下, `ENABLE_SWAGGER` 默认开启并且参数 `MAX_RESPONSE_ITEMS` 默认为 50。您可以从 [Config Cheat
Sheet](administration/config-cheat-sheet.md) 中获取更多配置相关信息。
通常情况下, `ENABLE_SWAGGER` 默认开启并且参数 `MAX_RESPONSE_ITEMS` 默认为 50。您可以从 [Config Cheat Sheet](administration/config-cheat-sheet.md) 中获取更多配置相关信息。
## 通过 API 认证

View File

@ -39,7 +39,6 @@ If a bug fix is targeted on 1.20.1 but 1.20.1 is not released yet, you can get t
To migrate from Gogs to Gitea:
- [Gogs version 0.9.146 or less](installation/upgrade-from-gogs.md)
- [Gogs version 0.11.46.0418](https://github.com/go-gitea/gitea/issues/4286)
To migrate from GitHub to Gitea, you can use Gitea's built-in migration form.
@ -138,9 +137,9 @@ All Gitea instances have the built-in API and there is no way to disable it comp
You can, however, disable showing its documentation by setting `ENABLE_SWAGGER` to `false` in the `api` section of your `app.ini`.
For more information, refer to Gitea's [API docs](development/api-usage.md).
You can see the latest API (for example) on <https://try.gitea.io/api/swagger>.
You can see the latest API (for example) on https://try.gitea.io/api/swagger
You can also see an example of the `swagger.json` file at <https://try.gitea.io/swagger.v1.json>.
You can also see an example of the `swagger.json` file at https://try.gitea.io/swagger.v1.json
## Adjusting your server for public/private use

View File

@ -41,7 +41,6 @@ menu:
要从Gogs迁移到Gitea
- [Gogs版本0.9.146或更低](installation/upgrade-from-gogs.md)
- [Gogs版本0.11.46.0418](https://github.com/go-gitea/gitea/issues/4286)
要从GitHub迁移到Gitea您可以使用Gitea内置的迁移表单。
@ -142,9 +141,9 @@ Gitea不提供内置的Pages服务器。您需要一个专用的域名来提供
但是您可以在app.ini的api部分将ENABLE_SWAGGER设置为false以禁用其文档显示。
有关更多信息请参阅Gitea的[API文档](development/api-usage.md)。
您可以在上查看最新的API例如<https://try.gitea.io/api/swagger>
您可以在上查看最新的API例如https://try.gitea.io/api/swagger
您还可以在上查看`swagger.json`文件的示例 <https://try.gitea.io/swagger.v1.json>
您还可以在上查看`swagger.json`文件的示例 https://try.gitea.io/swagger.v1.json
## 调整服务器用于公共/私有使用

View File

@ -117,7 +117,7 @@ chmod 770 /etc/gitea
- 使用 `gitea generate secret` 创建 `SECRET_KEY``INTERNAL_TOKEN`
- 提供所有必要的密钥
详情参考 [命令行文档](/zh-cn/command-line/) 中有关 `gitea generate secret` 的内容。
详情参考 [命令行文档](administration/command-line.md) 中有关 `gitea generate secret` 的内容。
### 配置 Gitea 工作路径
@ -209,6 +209,6 @@ remote: ./hooks/pre-receive.d/gitea: line 2: [...]: No such file or directory
如果您没有使用 Gitea 内置的 SSH 服务器,您还需要通过在管理选项中运行任务 `Update the '.ssh/authorized_keys' file with Gitea SSH keys.` 来重新编写授权密钥文件。
> 更多经验总结,请参考英文版 [Troubleshooting](/en-us/install-from-binary/#troubleshooting)
> 更多经验总结,请参考英文版 [Troubleshooting](https://docs.gitea.com/installation/install-from-binary#troubleshooting)
如果从本页中没有找到你需要的内容,请访问 [帮助页面](help/support.md)

View File

@ -64,7 +64,7 @@ git checkout v@version@ # or git checkout pr-xyz
- `go` @minGoVersion@ 或更高版本,请参阅 [这里](https://golang.org/dl/)
- `node` @minNodeVersion@ 或更高版本,并且安装 `npm`, 请参阅 [这里](https://nodejs.org/zh-cn/download/)
- `make`, 请参阅 [这里](/zh-cn/hacking-on-gitea/)
- `make`, 请参阅 [这里](development/hacking-on-gitea.md)
为了尽可能简化编译过程,提供了各种 [make任务](https://github.com/go-gitea/gitea/blob/main/Makefile)。

View File

@ -114,7 +114,7 @@ If you cannot see the settings page, please make sure that you have the right pe
The format of the registration token is a random string `D0gvfu2iHfUjNqCYVljVyRV14fISpJxxxxxxxxxx`.
A registration token can also be obtained from the gitea [command-line interface](../../administration/command-line.en-us.md#actions-generate-runner-token):
A registration token can also be obtained from the gitea [command-line interface](administration/command-line.md#actions-generate-runner-token):
```
gitea --config /etc/gitea/app.ini actions generate-runner-token

View File

@ -113,7 +113,7 @@ Runner级别决定了从哪里获取注册令牌。
注册令牌的格式是一个随机字符串 `D0gvfu2iHfUjNqCYVljVyRV14fISpJxxxxxxxxxx`
注册令牌也可以通过 Gitea 的 [命令行](../../administration/command-line.en-us.md#actions-generate-runner-token) 获得:
注册令牌也可以通过 Gitea 的 [命令行](administration/command-line.md#actions-generate-runner-token) 获得:
### 注册Runner

View File

@ -116,8 +116,8 @@ Pre and Post steps don't have their own section in the job log user interface.
Previously (Pre 1.21.0), `[actions].DEFAULT_ACTIONS_URL` defaulted to `https://gitea.com`.
We have since restricted this option to only allow two values (`github` and `self`).
When set to `github`, the new default, Gitea will download non-fully-qualified actions from <https://github.com>.
For example, if you use `uses: actions/checkout@v3`, it will download the checkout repository from <https://github.com/actions/checkout.git>.
When set to `github`, the new default, Gitea will download non-fully-qualified actions from `https://github.com`.
For example, if you use `uses: actions/checkout@v3`, it will download the checkout repository from `https://github.com/actions/checkout.git`.
If you want to download an action from another git hoster, you can use an absolute URL, e.g. `uses: https://gitea.com/actions/checkout@v3`.

View File

@ -23,7 +23,7 @@ First of all, you need a Gitea instance.
You can follow the [documentation](installation/from-package.md) to set up a new instance or upgrade your existing one.
It doesn't matter how you install or run Gitea, as long as its version is 1.19.0 or higher.
Actions are disabled by default, so you need to add the following to the configuration file to enable it:
Since 1.21.0, Actions are enabled by default. If you are using versions before 1.21.0, you need to add the following to the configuration file to enable it:
```ini
[actions]

View File

@ -23,7 +23,7 @@ menu:
您可以按照[文档](installation/from-package.md) 来设置一个新实例或升级现有实例。
无论您如何安装或运行Gitea只要版本号是1.19.0或更高即可。
默认情况下Actions是禁用的因此您需要将以下内容添加到配置文件中以启用它:
从1.21.0开始默认情况下Actions是启用的。如果您正在使用1.21.0之前的版本,您需要将以下内容添加到配置文件中以启用它:
```ini
[actions]

View File

@ -198,7 +198,7 @@ administrative user.
field is set to `mail.com`, then Gitea will expect the `user email` field
for an authenticated GIT instance to be `gituser@mail.com`.[^2]
**Note**: PAM support is added via [build-time flags](installation/install-from-source.md#build),
**Note**: PAM support is added via [build-time flags](installation/from-source.md#build),
and the official binaries provided do not have this enabled. PAM requires that
the necessary libpam dynamic library be available and the necessary PAM
development headers be accessible to the compiler.

View File

@ -162,7 +162,7 @@ PAM提供了一种机制通过对用户进行PAM认证来自动将其添加
- PAM电子邮件域:用户认证时要附加的电子邮件后缀。例如如果登录系统期望一个名为gituse的用户
并且将此字段设置为mail.com那么Gitea在验证一个GIT实例的用户时将期望user emai字段为gituser@mail.com[^2]。
**Note**: PAM 支持通过[build-time flags](installation/install-from-source.md#build)添加,
**Note**: PAM 支持通过[build-time flags](installation/from-source.md#build)添加,
而官方提供的二进制文件通常不会默认启用此功能。PAM需要确保系统上有必要的libpam动态库并且编译器可以访问必要的PAM开发头文件。
[^1]: 例如在Debian "Bullseye"上使用标准Linux登录可以使用`common-session-noninteractive`。这个值对于其他版本的Debian

View File

@ -14,6 +14,8 @@ import (
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
// ArtifactStatus is the status of an artifact, uploading, expired or need-delete
@ -108,29 +110,37 @@ func UpdateArtifactByID(ctx context.Context, id int64, art *ActionArtifact) erro
return err
}
// ListArtifactsByRunID returns all artifacts of a run
func ListArtifactsByRunID(ctx context.Context, runID int64) ([]*ActionArtifact, error) {
arts := make([]*ActionArtifact, 0, 10)
return arts, db.GetEngine(ctx).Where("run_id=?", runID).Find(&arts)
type FindArtifactsOptions struct {
db.ListOptions
RepoID int64
RunID int64
ArtifactName string
Status int
}
// ListArtifactsByRunIDAndArtifactName returns an artifacts of a run by artifact name
func ListArtifactsByRunIDAndArtifactName(ctx context.Context, runID int64, artifactName string) ([]*ActionArtifact, error) {
arts := make([]*ActionArtifact, 0, 10)
return arts, db.GetEngine(ctx).Where("run_id=? AND artifact_name=?", runID, artifactName).Find(&arts)
}
func (opts FindArtifactsOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.RunID > 0 {
cond = cond.And(builder.Eq{"run_id": opts.RunID})
}
if opts.ArtifactName != "" {
cond = cond.And(builder.Eq{"artifact_name": opts.ArtifactName})
}
if opts.Status > 0 {
cond = cond.And(builder.Eq{"status": opts.Status})
}
// ListUploadedArtifactsByRunID returns all uploaded artifacts of a run
func ListUploadedArtifactsByRunID(ctx context.Context, runID int64) ([]*ActionArtifact, error) {
arts := make([]*ActionArtifact, 0, 10)
return arts, db.GetEngine(ctx).Where("run_id=? AND status=?", runID, ArtifactStatusUploadConfirmed).Find(&arts)
return cond
}
// ActionArtifactMeta is the meta data of an artifact
type ActionArtifactMeta struct {
ArtifactName string
FileSize int64
Status int64
Status ArtifactStatus
}
// ListUploadedArtifactsMeta returns all uploaded artifacts meta of a run
@ -143,18 +153,6 @@ func ListUploadedArtifactsMeta(ctx context.Context, runID int64) ([]*ActionArtif
Find(&arts)
}
// ListArtifactsByRepoID returns all artifacts of a repo
func ListArtifactsByRepoID(ctx context.Context, repoID int64) ([]*ActionArtifact, error) {
arts := make([]*ActionArtifact, 0, 10)
return arts, db.GetEngine(ctx).Where("repo_id=?", repoID).Find(&arts)
}
// ListArtifactsByRunIDAndName returns artifacts by name of a run
func ListArtifactsByRunIDAndName(ctx context.Context, runID int64, name string) ([]*ActionArtifact, error) {
arts := make([]*ActionArtifact, 0, 10)
return arts, db.GetEngine(ctx).Where("run_id=? AND artifact_name=?", runID, name).Find(&arts)
}
// ListNeedExpiredArtifacts returns all need expired artifacts but not deleted
func ListNeedExpiredArtifacts(ctx context.Context) ([]*ActionArtifact, error) {
arts := make([]*ActionArtifact, 0, 10)

View File

@ -170,7 +170,7 @@ func updateRepoRunsNumbers(ctx context.Context, repo *repo_model.Repository) err
// CancelRunningJobs cancels all running and waiting jobs associated with a specific workflow.
func CancelRunningJobs(ctx context.Context, repoID int64, ref, workflowID string) error {
// Find all runs in the specified repository, reference, and workflow with statuses 'Running' or 'Waiting'.
runs, total, err := FindRuns(ctx, FindRunOptions{
runs, total, err := db.FindAndCount[ActionRun](ctx, FindRunOptions{
RepoID: repoID,
Ref: ref,
WorkflowID: workflowID,
@ -188,7 +188,7 @@ func CancelRunningJobs(ctx context.Context, repoID int64, ref, workflowID string
// Iterate over each found run and cancel its associated jobs.
for _, run := range runs {
// Find all jobs associated with the current run.
jobs, _, err := FindRunJobs(ctx, FindRunJobOptions{
jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{
RunID: run.ID,
})
if err != nil {

View File

@ -61,7 +61,7 @@ type FindRunJobOptions struct {
UpdatedBefore timeutil.TimeStamp
}
func (opts FindRunJobOptions) toConds() builder.Cond {
func (opts FindRunJobOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RunID > 0 {
cond = cond.And(builder.Eq{"run_id": opts.RunID})
@ -83,17 +83,3 @@ func (opts FindRunJobOptions) toConds() builder.Cond {
}
return cond
}
func FindRunJobs(ctx context.Context, opts FindRunJobOptions) (ActionJobList, int64, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
var tasks ActionJobList
total, err := e.FindAndCount(&tasks)
return tasks, total, err
}
func CountRunJobs(ctx context.Context, opts FindRunJobOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionRunJob))
}

View File

@ -75,7 +75,7 @@ type FindRunOptions struct {
Status []Status
}
func (opts FindRunOptions) toConds() builder.Cond {
func (opts FindRunOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
@ -101,18 +101,8 @@ func (opts FindRunOptions) toConds() builder.Cond {
return cond
}
func FindRuns(ctx context.Context, opts FindRunOptions) (RunList, int64, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
var runs RunList
total, err := e.Desc("id").FindAndCount(&runs)
return runs, total, err
}
func CountRuns(ctx context.Context, opts FindRunOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionRun))
func (opts FindRunOptions) ToOrders() string {
return "`id` DESC"
}
type StatusInfo struct {

View File

@ -156,7 +156,7 @@ type FindRunnerOptions struct {
WithAvailable bool // not only runners belong to, but also runners can be used
}
func (opts FindRunnerOptions) toCond() builder.Cond {
func (opts FindRunnerOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
@ -181,7 +181,7 @@ func (opts FindRunnerOptions) toCond() builder.Cond {
return cond
}
func (opts FindRunnerOptions) toOrder() string {
func (opts FindRunnerOptions) ToOrders() string {
switch opts.Sort {
case "online":
return "last_online DESC"
@ -199,22 +199,6 @@ func (opts FindRunnerOptions) toOrder() string {
return "last_online DESC"
}
func CountRunners(ctx context.Context, opts FindRunnerOptions) (int64, error) {
return db.GetEngine(ctx).
Where(opts.toCond()).
Count(ActionRunner{})
}
func FindRunners(ctx context.Context, opts FindRunnerOptions) (runners RunnerList, err error) {
sess := db.GetEngine(ctx).
Where(opts.toCond()).
OrderBy(opts.toOrder())
if opts.Page > 0 {
sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
return runners, sess.Find(&runners)
}
// GetRunnerByUUID returns a runner via uuid
func GetRunnerByUUID(ctx context.Context, uuid string) (*ActionRunner, error) {
var runner ActionRunner
@ -263,8 +247,7 @@ func DeleteRunner(ctx context.Context, id int64) error {
// CreateRunner creates new runner.
func CreateRunner(ctx context.Context, t *ActionRunner) error {
_, err := db.GetEngine(ctx).Insert(t)
return err
return db.Insert(ctx, t)
}
func CountRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) {

View File

@ -67,7 +67,7 @@ type FindScheduleOptions struct {
OwnerID int64
}
func (opts FindScheduleOptions) toConds() builder.Cond {
func (opts FindScheduleOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
@ -79,16 +79,6 @@ func (opts FindScheduleOptions) toConds() builder.Cond {
return cond
}
func FindSchedules(ctx context.Context, opts FindScheduleOptions) (ScheduleList, int64, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if !opts.ListAll && opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
var schedules ScheduleList
total, err := e.Desc("id").FindAndCount(&schedules)
return schedules, total, err
}
func CountSchedules(ctx context.Context, opts FindScheduleOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionSchedule))
func (opts FindScheduleOptions) ToOrders() string {
return "`id` DESC"
}

View File

@ -71,7 +71,7 @@ type FindSpecOptions struct {
Next int64
}
func (opts FindSpecOptions) toConds() builder.Cond {
func (opts FindSpecOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
@ -84,23 +84,18 @@ func (opts FindSpecOptions) toConds() builder.Cond {
return cond
}
func (opts FindSpecOptions) ToOrders() string {
return "`id` DESC"
}
func FindSpecs(ctx context.Context, opts FindSpecOptions) (SpecList, int64, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
var specs SpecList
total, err := e.Desc("id").FindAndCount(&specs)
specs, total, err := db.FindAndCount[ActionScheduleSpec](ctx, opts)
if err != nil {
return nil, 0, err
}
if err := specs.LoadSchedules(ctx); err != nil {
if err := SpecList(specs).LoadSchedules(ctx); err != nil {
return nil, 0, err
}
return specs, total, nil
}
func CountSpecs(ctx context.Context, opts FindSpecOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionScheduleSpec))
}

View File

@ -234,7 +234,7 @@ func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask
}
var jobs []*ActionRunJob
if err := e.Where("task_id=? AND status=?", 0, StatusWaiting).And(jobCond).Asc("id").Find(&jobs); err != nil {
if err := e.Where("task_id=? AND status=?", 0, StatusWaiting).And(jobCond).Asc("updated", "id").Find(&jobs); err != nil {
return nil, false, err
}

View File

@ -62,7 +62,7 @@ type FindTaskOptions struct {
IDOrderDesc bool
}
func (opts FindTaskOptions) toConds() builder.Cond {
func (opts FindTaskOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
@ -88,18 +88,9 @@ func (opts FindTaskOptions) toConds() builder.Cond {
return cond
}
func FindTasks(ctx context.Context, opts FindTaskOptions) (TaskList, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
func (opts FindTaskOptions) ToOrders() string {
if opts.IDOrderDesc {
e.OrderBy("id DESC")
return "`id` DESC"
}
var tasks TaskList
return tasks, e.Find(&tasks)
}
func CountTasks(ctx context.Context, opts FindTaskOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionTask))
return ""
}

View File

@ -20,6 +20,10 @@ type ActionTaskOutput struct {
OutputValue string `xorm:"MEDIUMTEXT"`
}
func init() {
db.RegisterModel(new(ActionTaskOutput))
}
// FindTaskOutputByTaskID returns the outputs of the task.
func FindTaskOutputByTaskID(ctx context.Context, taskID int64) ([]*ActionTaskOutput, error) {
var outputs []*ActionTaskOutput

View File

@ -56,7 +56,7 @@ type FindVariablesOpts struct {
RepoID int64
}
func (opts *FindVariablesOpts) toConds() builder.Cond {
func (opts FindVariablesOpts) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
@ -67,15 +67,6 @@ func (opts *FindVariablesOpts) toConds() builder.Cond {
return cond
}
func FindVariables(ctx context.Context, opts FindVariablesOpts) ([]*ActionVariable, error) {
var variables []*ActionVariable
sess := db.GetEngine(ctx)
if opts.PageSize != 0 {
sess = db.SetSessionPagination(sess, &opts.ListOptions)
}
return variables, sess.Where(opts.toConds()).Find(&variables)
}
func GetVariableByID(ctx context.Context, variableID int64) (*ActionVariable, error) {
var variable ActionVariable
has, err := db.GetEngine(ctx).Where("id=?", variableID).Get(&variable)

View File

@ -22,7 +22,6 @@ import (
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
"xorm.io/xorm"
)
type (
@ -93,7 +92,7 @@ type FindNotificationOptions struct {
}
// ToCond will convert each condition into a xorm-Cond
func (opts *FindNotificationOptions) ToCond() builder.Cond {
func (opts FindNotificationOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.UserID != 0 {
cond = cond.And(builder.Eq{"notification.user_id": opts.UserID})
@ -105,7 +104,11 @@ func (opts *FindNotificationOptions) ToCond() builder.Cond {
cond = cond.And(builder.Eq{"notification.issue_id": opts.IssueID})
}
if len(opts.Status) > 0 {
cond = cond.And(builder.In("notification.status", opts.Status))
if len(opts.Status) == 1 {
cond = cond.And(builder.Eq{"notification.status": opts.Status[0]})
} else {
cond = cond.And(builder.In("notification.status", opts.Status))
}
}
if len(opts.Source) > 0 {
cond = cond.And(builder.In("notification.source", opts.Source))
@ -119,24 +122,8 @@ func (opts *FindNotificationOptions) ToCond() builder.Cond {
return cond
}
// ToSession will convert the given options to a xorm Session by using the conditions from ToCond and joining with issue table if required
func (opts *FindNotificationOptions) ToSession(ctx context.Context) *xorm.Session {
sess := db.GetEngine(ctx).Where(opts.ToCond())
if opts.Page != 0 {
sess = db.SetSessionPagination(sess, opts)
}
return sess
}
// GetNotifications returns all notifications that fit to the given options.
func GetNotifications(ctx context.Context, options *FindNotificationOptions) (nl NotificationList, err error) {
err = options.ToSession(ctx).OrderBy("notification.updated_unix DESC").Find(&nl)
return nl, err
}
// CountNotifications count all notifications that fit to the given options and ignore pagination.
func CountNotifications(ctx context.Context, opts *FindNotificationOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.ToCond()).Count(&Notification{})
func (opts FindNotificationOptions) ToOrders() string {
return "notification.updated_unix DESC"
}
// CreateRepoTransferNotification creates notification for the user a repository was transferred to
@ -192,7 +179,9 @@ func CreateOrUpdateIssueNotifications(ctx context.Context, issueID, commentID, n
func createOrUpdateIssueNotifications(ctx context.Context, issueID, commentID, notificationAuthorID, receiverID int64) error {
// init
var toNotify container.Set[int64]
notifications, err := getNotificationsByIssueID(ctx, issueID)
notifications, err := db.Find[Notification](ctx, FindNotificationOptions{
IssueID: issueID,
})
if err != nil {
return err
}
@ -273,23 +262,6 @@ func createOrUpdateIssueNotifications(ctx context.Context, issueID, commentID, n
return nil
}
func getNotificationsByIssueID(ctx context.Context, issueID int64) (notifications []*Notification, err error) {
err = db.GetEngine(ctx).
Where("issue_id = ?", issueID).
Find(&notifications)
return notifications, err
}
func notificationExists(notifications []*Notification, issueID, userID int64) bool {
for _, notification := range notifications {
if notification.IssueID == issueID && notification.UserID == userID {
return true
}
}
return false
}
func createIssueNotification(ctx context.Context, userID int64, issue *issues_model.Issue, commentID, updatedByID int64) error {
notification := &Notification{
UserID: userID,
@ -341,35 +313,6 @@ func GetIssueNotification(ctx context.Context, userID, issueID int64) (*Notifica
return notification, err
}
// NotificationsForUser returns notifications for a given user and status
func NotificationsForUser(ctx context.Context, user *user_model.User, statuses []NotificationStatus, page, perPage int) (notifications NotificationList, err error) {
if len(statuses) == 0 {
return nil, nil
}
sess := db.GetEngine(ctx).
Where("user_id = ?", user.ID).
In("status", statuses).
OrderBy("updated_unix DESC")
if page > 0 && perPage > 0 {
sess.Limit(perPage, (page-1)*perPage)
}
err = sess.Find(&notifications)
return notifications, err
}
// CountUnread count unread notifications for a user
func CountUnread(ctx context.Context, userID int64) int64 {
exist, err := db.GetEngine(ctx).Where("user_id = ?", userID).And("status = ?", NotificationStatusUnread).Count(new(Notification))
if err != nil {
log.Error("countUnread", err)
return 0
}
return exist
}
// LoadAttributes load Repo Issue User and Comment if not loaded
func (n *Notification) LoadAttributes(ctx context.Context) (err error) {
if err = n.loadRepo(ctx); err != nil {
@ -481,17 +424,47 @@ func (n *Notification) APIURL() string {
return setting.AppURL + "api/v1/notifications/threads/" + strconv.FormatInt(n.ID, 10)
}
func notificationExists(notifications []*Notification, issueID, userID int64) bool {
for _, notification := range notifications {
if notification.IssueID == issueID && notification.UserID == userID {
return true
}
}
return false
}
// UserIDCount is a simple coalition of UserID and Count
type UserIDCount struct {
UserID int64
Count int64
}
// GetUIDsAndNotificationCounts between the two provided times
func GetUIDsAndNotificationCounts(ctx context.Context, since, until timeutil.TimeStamp) ([]UserIDCount, error) {
sql := `SELECT user_id, count(*) AS count FROM notification ` +
`WHERE user_id IN (SELECT user_id FROM notification WHERE updated_unix >= ? AND ` +
`updated_unix < ?) AND status = ? GROUP BY user_id`
var res []UserIDCount
return res, db.GetEngine(ctx).SQL(sql, since, until, NotificationStatusUnread).Find(&res)
}
// NotificationList contains a list of notifications
type NotificationList []*Notification
// LoadAttributes load Repo Issue User and Comment if not loaded
func (nl NotificationList) LoadAttributes(ctx context.Context) error {
var err error
for i := 0; i < len(nl); i++ {
err = nl[i].LoadAttributes(ctx)
if err != nil && !issues_model.IsErrCommentNotExist(err) {
return err
}
if _, _, err := nl.LoadRepos(ctx); err != nil {
return err
}
if _, err := nl.LoadIssues(ctx); err != nil {
return err
}
if _, err := nl.LoadUsers(ctx); err != nil {
return err
}
if _, err := nl.LoadComments(ctx); err != nil {
return err
}
return nil
}
@ -665,6 +638,68 @@ func (nl NotificationList) getPendingCommentIDs() []int64 {
return ids.Values()
}
func (nl NotificationList) getUserIDs() []int64 {
ids := make(container.Set[int64], len(nl))
for _, notification := range nl {
if notification.UserID == 0 || notification.User != nil {
continue
}
ids.Add(notification.UserID)
}
return ids.Values()
}
// LoadUsers loads users from database
func (nl NotificationList) LoadUsers(ctx context.Context) ([]int, error) {
if len(nl) == 0 {
return []int{}, nil
}
userIDs := nl.getUserIDs()
users := make(map[int64]*user_model.User, len(userIDs))
left := len(userIDs)
for left > 0 {
limit := db.DefaultMaxInSize
if left < limit {
limit = left
}
rows, err := db.GetEngine(ctx).
In("id", userIDs[:limit]).
Rows(new(user_model.User))
if err != nil {
return nil, err
}
for rows.Next() {
var user user_model.User
err = rows.Scan(&user)
if err != nil {
rows.Close()
return nil, err
}
users[user.ID] = &user
}
_ = rows.Close()
left -= limit
userIDs = userIDs[limit:]
}
failures := []int{}
for i, notification := range nl {
if notification.UserID > 0 && notification.User == nil && users[notification.UserID] != nil {
notification.User = users[notification.UserID]
if notification.User == nil {
log.Error("Notification[%d]: UserID[%d] failed to load", notification.ID, notification.UserID)
failures = append(failures, i)
continue
}
}
}
return failures, nil
}
// LoadComments loads comments from database
func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) {
if len(nl) == 0 {
@ -717,30 +752,6 @@ func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) {
return failures, nil
}
// GetNotificationCount returns the notification count for user
func GetNotificationCount(ctx context.Context, user *user_model.User, status NotificationStatus) (count int64, err error) {
count, err = db.GetEngine(ctx).
Where("user_id = ?", user.ID).
And("status = ?", status).
Count(&Notification{})
return count, err
}
// UserIDCount is a simple coalition of UserID and Count
type UserIDCount struct {
UserID int64
Count int64
}
// GetUIDsAndNotificationCounts between the two provided times
func GetUIDsAndNotificationCounts(ctx context.Context, since, until timeutil.TimeStamp) ([]UserIDCount, error) {
sql := `SELECT user_id, count(*) AS count FROM notification ` +
`WHERE user_id IN (SELECT user_id FROM notification WHERE updated_unix >= ? AND ` +
`updated_unix < ?) AND status = ? GROUP BY user_id`
var res []UserIDCount
return res, db.GetEngine(ctx).SQL(sql, since, until, NotificationStatusUnread).Find(&res)
}
// SetIssueReadBy sets issue to be read by given user.
func SetIssueReadBy(ctx context.Context, issueID, userID int64) error {
if err := issues_model.UpdateIssueUserByRead(ctx, userID, issueID); err != nil {

View File

@ -34,8 +34,13 @@ func TestCreateOrUpdateIssueNotifications(t *testing.T) {
func TestNotificationsForUser(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
statuses := []activities_model.NotificationStatus{activities_model.NotificationStatusRead, activities_model.NotificationStatusUnread}
notfs, err := activities_model.NotificationsForUser(db.DefaultContext, user, statuses, 1, 10)
notfs, err := db.Find[activities_model.Notification](db.DefaultContext, activities_model.FindNotificationOptions{
UserID: user.ID,
Status: []activities_model.NotificationStatus{
activities_model.NotificationStatusRead,
activities_model.NotificationStatusUnread,
},
})
assert.NoError(t, err)
if assert.Len(t, notfs, 3) {
assert.EqualValues(t, 5, notfs[0].ID)
@ -68,11 +73,21 @@ func TestNotification_GetIssue(t *testing.T) {
func TestGetNotificationCount(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
cnt, err := activities_model.GetNotificationCount(db.DefaultContext, user, activities_model.NotificationStatusRead)
cnt, err := db.Count[activities_model.Notification](db.DefaultContext, activities_model.FindNotificationOptions{
UserID: user.ID,
Status: []activities_model.NotificationStatus{
activities_model.NotificationStatusRead,
},
})
assert.NoError(t, err)
assert.EqualValues(t, 0, cnt)
cnt, err = activities_model.GetNotificationCount(db.DefaultContext, user, activities_model.NotificationStatusUnread)
cnt, err = db.Count[activities_model.Notification](db.DefaultContext, activities_model.FindNotificationOptions{
UserID: user.ID,
Status: []activities_model.NotificationStatus{
activities_model.NotificationStatusUnread,
},
})
assert.NoError(t, err)
assert.EqualValues(t, 1, cnt)
}

View File

@ -52,7 +52,7 @@ type IssueByRepositoryCount struct {
func GetStatistic(ctx context.Context) (stats Statistic) {
e := db.GetEngine(ctx)
stats.Counter.User = user_model.CountUsers(ctx, nil)
stats.Counter.Org, _ = organization.CountOrgs(ctx, organization.FindOrgOptions{IncludePrivate: true})
stats.Counter.Org, _ = db.Count[organization.Organization](ctx, organization.FindOrgOptions{IncludePrivate: true})
stats.Counter.PublicKey, _ = e.Count(new(asymkey_model.PublicKey))
stats.Counter.Repo, _ = repo_model.CountRepositories(ctx, repo_model.CountRepositoryOptions{})
stats.Counter.Watch, _ = e.Count(new(repo_model.Watch))
@ -102,7 +102,7 @@ func GetStatistic(ctx context.Context) (stats Statistic) {
stats.Counter.Follow, _ = e.Count(new(user_model.Follow))
stats.Counter.Mirror, _ = e.Count(new(repo_model.Mirror))
stats.Counter.Release, _ = e.Count(new(repo_model.Release))
stats.Counter.AuthSource = auth.CountSources(ctx, auth.FindSourcesOptions{})
stats.Counter.AuthSource, _ = db.Count[auth.Source](ctx, auth.FindSourcesOptions{})
stats.Counter.Webhook, _ = e.Count(new(webhook.Webhook))
stats.Counter.Milestone, _ = e.Count(new(issues_model.Milestone))
stats.Counter.Label, _ = e.Count(new(issues_model.Label))

View File

@ -92,10 +92,9 @@ func CountUserGPGKeys(ctx context.Context, userID int64) (int64, error) {
return db.GetEngine(ctx).Where("owner_id=? AND primary_key_id=''", userID).Count(&GPGKey{})
}
// GetGPGKeyByID returns public key by given ID.
func GetGPGKeyByID(ctx context.Context, keyID int64) (*GPGKey, error) {
func GetGPGKeyForUserByID(ctx context.Context, ownerID, keyID int64) (*GPGKey, error) {
key := new(GPGKey)
has, err := db.GetEngine(ctx).ID(keyID).Get(key)
has, err := db.GetEngine(ctx).Where("id=? AND owner_id=?", keyID, ownerID).Get(key)
if err != nil {
return nil, err
} else if !has {
@ -225,7 +224,7 @@ func deleteGPGKey(ctx context.Context, keyID string) (int64, error) {
// DeleteGPGKey deletes GPG key information in database.
func DeleteGPGKey(ctx context.Context, doer *user_model.User, id int64) (err error) {
key, err := GetGPGKeyByID(ctx, id)
key, err := GetGPGKeyForUserByID(ctx, doer.ID, id)
if err != nil {
if IsErrGPGKeyNotExist(err) {
return nil
@ -233,11 +232,6 @@ func DeleteGPGKey(ctx context.Context, doer *user_model.User, id int64) (err err
return fmt.Errorf("GetPublicKeyByID: %w", err)
}
// Check if user has access to delete this key.
if !doer.IsAdmin && doer.ID != key.OwnerID {
return ErrGPGKeyAccessDenied{doer.ID, key.ID}
}
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err

View File

@ -179,45 +179,33 @@ func SearchPublicKeyByContentExact(ctx context.Context, content string) (*Public
return key, nil
}
// SearchPublicKey returns a list of public keys matching the provided arguments.
func SearchPublicKey(ctx context.Context, uid int64, fingerprint string) ([]*PublicKey, error) {
keys := make([]*PublicKey, 0, 5)
type FindPublicKeyOptions struct {
db.ListOptions
OwnerID int64
Fingerprint string
KeyTypes []KeyType
NotKeytype KeyType
LoginSourceID int64
}
func (opts FindPublicKeyOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if uid != 0 {
cond = cond.And(builder.Eq{"owner_id": uid})
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
}
if fingerprint != "" {
cond = cond.And(builder.Eq{"fingerprint": fingerprint})
if opts.Fingerprint != "" {
cond = cond.And(builder.Eq{"fingerprint": opts.Fingerprint})
}
return keys, db.GetEngine(ctx).Where(cond).Find(&keys)
}
// ListPublicKeys returns a list of public keys belongs to given user.
func ListPublicKeys(ctx context.Context, uid int64, listOptions db.ListOptions) ([]*PublicKey, error) {
sess := db.GetEngine(ctx).Where("owner_id = ? AND type != ?", uid, KeyTypePrincipal)
if listOptions.Page != 0 {
sess = db.SetSessionPagination(sess, &listOptions)
keys := make([]*PublicKey, 0, listOptions.PageSize)
return keys, sess.Find(&keys)
if len(opts.KeyTypes) > 0 {
cond = cond.And(builder.In("type", opts.KeyTypes))
}
keys := make([]*PublicKey, 0, 5)
return keys, sess.Find(&keys)
}
// CountPublicKeys count public keys a user has
func CountPublicKeys(ctx context.Context, userID int64) (int64, error) {
sess := db.GetEngine(ctx).Where("owner_id = ? AND type != ?", userID, KeyTypePrincipal)
return sess.Count(&PublicKey{})
}
// ListPublicKeysBySource returns a list of synchronized public keys for a given user and login source.
func ListPublicKeysBySource(ctx context.Context, uid, authSourceID int64) ([]*PublicKey, error) {
keys := make([]*PublicKey, 0, 5)
return keys, db.GetEngine(ctx).
Where("owner_id = ? AND login_source_id = ?", uid, authSourceID).
Find(&keys)
if opts.NotKeytype > 0 {
cond = cond.And(builder.Neq{"type": opts.NotKeytype})
}
if opts.LoginSourceID > 0 {
cond = cond.And(builder.Eq{"login_source_id": opts.LoginSourceID})
}
return cond
}
// UpdatePublicKeyUpdated updates public key use time.
@ -394,7 +382,10 @@ func SynchronizePublicKeys(ctx context.Context, usr *user_model.User, s *auth.So
// Get Public Keys from DB with current LDAP source
var giteaKeys []string
keys, err := ListPublicKeysBySource(ctx, usr.ID, s.ID)
keys, err := db.Find[PublicKey](ctx, FindPublicKeyOptions{
OwnerID: usr.ID,
LoginSourceID: s.ID,
})
if err != nil {
log.Error("synchronizePublicKeys[%s]: Error listing Public SSH Keys for user %s: %v", s.Name, usr.Name, err)
}

View File

@ -21,7 +21,10 @@ import (
func ParseCommitWithSSHSignature(ctx context.Context, c *git.Commit, committer *user_model.User) *CommitVerification {
// Now try to associate the signature with the committer, if present
if committer.ID != 0 {
keys, err := ListPublicKeys(ctx, committer.ID, db.ListOptions{})
keys, err := db.Find[PublicKey](ctx, FindPublicKeyOptions{
OwnerID: committer.ID,
NotKeytype: KeyTypePrincipal,
})
if err != nil { // Skipping failed to get ssh keys of user
log.Error("ListPublicKeys: %v", err)
return &CommitVerification{

View File

@ -210,7 +210,7 @@ type ListDeployKeysOptions struct {
Fingerprint string
}
func (opt ListDeployKeysOptions) toCond() builder.Cond {
func (opt ListDeployKeysOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opt.RepoID != 0 {
cond = cond.And(builder.Eq{"repo_id": opt.RepoID})
@ -223,23 +223,3 @@ func (opt ListDeployKeysOptions) toCond() builder.Cond {
}
return cond
}
// ListDeployKeys returns a list of deploy keys matching the provided arguments.
func ListDeployKeys(ctx context.Context, opts *ListDeployKeysOptions) ([]*DeployKey, error) {
sess := db.GetEngine(ctx).Where(opts.toCond())
if opts.Page != 0 {
sess = db.SetSessionPagination(sess, opts)
keys := make([]*DeployKey, 0, opts.PageSize)
return keys, sess.Find(&keys)
}
keys := make([]*DeployKey, 0, 5)
return keys, sess.Find(&keys)
}
// CountDeployKeys returns count deploy keys matching the provided arguments.
func CountDeployKeys(ctx context.Context, opts *ListDeployKeysOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toCond()).Count(&DeployKey{})
}

View File

@ -17,6 +17,7 @@ import (
"code.gitea.io/gitea/modules/util"
lru "github.com/hashicorp/golang-lru/v2"
"xorm.io/builder"
)
// ErrAccessTokenNotExist represents a "AccessTokenNotExist" kind of error.
@ -201,25 +202,18 @@ type ListAccessTokensOptions struct {
UserID int64
}
// ListAccessTokens returns a list of access tokens belongs to given user.
func ListAccessTokens(ctx context.Context, opts ListAccessTokensOptions) ([]*AccessToken, error) {
sess := db.GetEngine(ctx).Where("uid=?", opts.UserID)
if len(opts.Name) != 0 {
sess = sess.Where("name=?", opts.Name)
func (opts ListAccessTokensOptions) ToConds() builder.Cond {
cond := builder.NewCond()
// user id is required, otherwise it will return all result which maybe a possible bug
cond = cond.And(builder.Eq{"uid": opts.UserID})
if len(opts.Name) > 0 {
cond = cond.And(builder.Eq{"name": opts.Name})
}
return cond
}
sess = sess.Desc("created_unix")
if opts.Page != 0 {
sess = db.SetSessionPagination(sess, &opts)
tokens := make([]*AccessToken, 0, opts.PageSize)
return tokens, sess.Find(&tokens)
}
tokens := make([]*AccessToken, 0, 5)
return tokens, sess.Find(&tokens)
func (opts ListAccessTokensOptions) ToOrders() string {
return "created_unix DESC"
}
// UpdateAccessToken updates information of access token.
@ -228,15 +222,6 @@ func UpdateAccessToken(ctx context.Context, t *AccessToken) error {
return err
}
// CountAccessTokens count access tokens belongs to given user by options
func CountAccessTokens(ctx context.Context, opts ListAccessTokensOptions) (int64, error) {
sess := db.GetEngine(ctx).Where("uid=?", opts.UserID)
if len(opts.Name) != 0 {
sess = sess.Where("name=?", opts.Name)
}
return sess.Count(&AccessToken{})
}
// DeleteAccessTokenByID deletes access token by given ID.
func DeleteAccessTokenByID(ctx context.Context, id, userID int64) error {
cnt, err := db.GetEngine(ctx).ID(id).Delete(&AccessToken{

View File

@ -85,7 +85,7 @@ func TestGetAccessTokenBySHA(t *testing.T) {
func TestListAccessTokens(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
tokens, err := auth_model.ListAccessTokens(db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 1})
tokens, err := db.Find[auth_model.AccessToken](db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 1})
assert.NoError(t, err)
if assert.Len(t, tokens, 2) {
assert.Equal(t, int64(1), tokens[0].UID)
@ -94,14 +94,14 @@ func TestListAccessTokens(t *testing.T) {
assert.Contains(t, []string{tokens[0].Name, tokens[1].Name}, "Token B")
}
tokens, err = auth_model.ListAccessTokens(db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 2})
tokens, err = db.Find[auth_model.AccessToken](db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 2})
assert.NoError(t, err)
if assert.Len(t, tokens, 1) {
assert.Equal(t, int64(2), tokens[0].UID)
assert.Equal(t, "Token A", tokens[0].Name)
}
tokens, err = auth_model.ListAccessTokens(db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 100})
tokens, err = db.Find[auth_model.AccessToken](db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 100})
assert.NoError(t, err)
assert.Empty(t, tokens)
}

View File

@ -5,6 +5,7 @@ package auth
import (
"context"
"crypto/sha256"
"encoding/base32"
"encoding/base64"
"fmt"
@ -19,7 +20,6 @@ import (
"code.gitea.io/gitea/modules/util"
uuid "github.com/google/uuid"
"github.com/minio/sha256-simd"
"golang.org/x/crypto/bcrypt"
"xorm.io/builder"
"xorm.io/xorm"
@ -243,13 +243,6 @@ func GetOAuth2ApplicationByID(ctx context.Context, id int64) (app *OAuth2Applica
return app, nil
}
// GetOAuth2ApplicationsByUserID returns all oauth2 applications owned by the user
func GetOAuth2ApplicationsByUserID(ctx context.Context, userID int64) (apps []*OAuth2Application, err error) {
apps = make([]*OAuth2Application, 0)
err = db.GetEngine(ctx).Where("uid = ?", userID).Find(&apps)
return apps, err
}
// CreateOAuth2ApplicationOptions holds options to create an oauth2 application
type CreateOAuth2ApplicationOptions struct {
Name string
@ -372,25 +365,6 @@ func DeleteOAuth2Application(ctx context.Context, id, userid int64) error {
return committer.Commit()
}
// ListOAuth2Applications returns a list of oauth2 applications belongs to given user.
func ListOAuth2Applications(ctx context.Context, uid int64, listOptions db.ListOptions) ([]*OAuth2Application, int64, error) {
sess := db.GetEngine(ctx).
Where("uid=?", uid).
Desc("id")
if listOptions.Page != 0 {
sess = db.SetSessionPagination(sess, &listOptions)
apps := make([]*OAuth2Application, 0, listOptions.PageSize)
total, err := sess.FindAndCount(&apps)
return apps, total, err
}
apps := make([]*OAuth2Application, 0, 5)
total, err := sess.FindAndCount(&apps)
return apps, total, err
}
//////////////////////////////////////////////////////
// OAuth2AuthorizationCode is a code to obtain an access token in combination with the client secret once. It has a limited lifetime.

View File

@ -0,0 +1,32 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package auth
import (
"code.gitea.io/gitea/models/db"
"xorm.io/builder"
)
type FindOAuth2ApplicationsOptions struct {
db.ListOptions
// OwnerID is the user id or org id of the owner of the application
OwnerID int64
// find global applications, if true, then OwnerID will be igonred
IsGlobal bool
}
func (opts FindOAuth2ApplicationsOptions) ToConds() builder.Cond {
conds := builder.NewCond()
if opts.IsGlobal {
conds = conds.And(builder.Eq{"uid": 0})
} else if opts.OwnerID != 0 {
conds = conds.And(builder.Eq{"uid": opts.OwnerID})
}
return conds
}
func (opts FindOAuth2ApplicationsOptions) ToOrders() string {
return "id DESC"
}

View File

@ -242,6 +242,7 @@ func CreateSource(ctx context.Context, source *Source) error {
}
type FindSourcesOptions struct {
db.ListOptions
IsActive util.OptionalBool
LoginType Type
}
@ -257,27 +258,22 @@ func (opts FindSourcesOptions) ToConds() builder.Cond {
return conds
}
// FindSources returns a slice of login sources found in DB according to given conditions.
func FindSources(ctx context.Context, opts FindSourcesOptions) ([]*Source, error) {
auths := make([]*Source, 0, 6)
return auths, db.GetEngine(ctx).Where(opts.ToConds()).Find(&auths)
}
// IsSSPIEnabled returns true if there is at least one activated login
// source of type LoginSSPI
func IsSSPIEnabled(ctx context.Context) bool {
if !db.HasEngine {
return false
}
sources, err := FindSources(ctx, FindSourcesOptions{
exist, err := db.Exists[Source](ctx, FindSourcesOptions{
IsActive: util.OptionalBoolTrue,
LoginType: SSPI,
})
if err != nil {
log.Error("ActiveSources: %v", err)
log.Error("Active SSPI Sources: %v", err)
return false
}
return len(sources) > 0
return exist
}
// GetSourceByID returns login source by given ID.
@ -346,12 +342,6 @@ func UpdateSource(ctx context.Context, source *Source) error {
return err
}
// CountSources returns number of login sources.
func CountSources(ctx context.Context, opts FindSourcesOptions) int64 {
count, _ := db.GetEngine(ctx).Where(opts.ToConds()).Count(new(Source))
return count
}
// ErrSourceNotExist represents a "SourceNotExist" kind of error.
type ErrSourceNotExist struct {
ID int64

View File

@ -264,3 +264,8 @@ func inTransaction(ctx context.Context) (*xorm.Session, bool) {
return nil, false
}
}
func Exists[T any](ctx context.Context, opts FindOptions) (bool, error) {
var bean T
return GetEngine(ctx).Where(opts.ToConds()).Exist(&bean)
}

View File

@ -14,7 +14,8 @@ import (
const (
// DefaultMaxInSize represents default variables number on IN () in SQL
DefaultMaxInSize = 50
DefaultMaxInSize = 50
defaultFindSliceSize = 10
)
// Paginator is the base for different ListOptions types
@ -52,7 +53,12 @@ type ListOptions struct {
ListAll bool // if true, then PageSize and Page will not be taken
}
var _ Paginator = &ListOptions{}
var ListOptionsAll = ListOptions{ListAll: true}
var (
_ Paginator = &ListOptions{}
_ FindOptions = ListOptions{}
)
// GetSkipTake returns the skip and take values
func (opts *ListOptions) GetSkipTake() (skip, take int) {
@ -67,8 +73,16 @@ func (opts *ListOptions) GetStartEnd() (start, end int) {
return start, end
}
func (opts ListOptions) GetPage() int {
return opts.Page
}
func (opts ListOptions) GetPageSize() int {
return opts.PageSize
}
// IsListAll indicates PageSize and Page will be ignored
func (opts *ListOptions) IsListAll() bool {
func (opts ListOptions) IsListAll() bool {
return opts.ListAll
}
@ -85,6 +99,10 @@ func (opts *ListOptions) SetDefaultValues() {
}
}
func (opts ListOptions) ToConds() builder.Cond {
return builder.NewCond()
}
// AbsoluteListOptions absolute options to paginate results
type AbsoluteListOptions struct {
skip int
@ -124,29 +142,63 @@ func (opts *AbsoluteListOptions) GetStartEnd() (start, end int) {
// FindOptions represents a find options
type FindOptions interface {
Paginator
GetPage() int
GetPageSize() int
IsListAll() bool
ToConds() builder.Cond
}
type FindOptionsOrder interface {
ToOrders() string
}
// Find represents a common find function which accept an options interface
func Find[T any](ctx context.Context, opts FindOptions, objects *[]T) error {
func Find[T any](ctx context.Context, opts FindOptions) ([]*T, error) {
sess := GetEngine(ctx).Where(opts.ToConds())
if !opts.IsListAll() {
sess.Limit(opts.GetSkipTake())
page, pageSize := opts.GetPage(), opts.GetPageSize()
if !opts.IsListAll() && pageSize > 0 && page >= 1 {
sess.Limit(pageSize, (page-1)*pageSize)
}
return sess.Find(objects)
if newOpt, ok := opts.(FindOptionsOrder); ok && newOpt.ToOrders() != "" {
sess.OrderBy(newOpt.ToOrders())
}
findPageSize := defaultFindSliceSize
if pageSize > 0 {
findPageSize = pageSize
}
objects := make([]*T, 0, findPageSize)
if err := sess.Find(&objects); err != nil {
return nil, err
}
return objects, nil
}
// Count represents a common count function which accept an options interface
func Count[T any](ctx context.Context, opts FindOptions, object T) (int64, error) {
return GetEngine(ctx).Where(opts.ToConds()).Count(object)
func Count[T any](ctx context.Context, opts FindOptions) (int64, error) {
var object T
return GetEngine(ctx).Where(opts.ToConds()).Count(&object)
}
// FindAndCount represents a common findandcount function which accept an options interface
func FindAndCount[T any](ctx context.Context, opts FindOptions, objects *[]T) (int64, error) {
func FindAndCount[T any](ctx context.Context, opts FindOptions) ([]*T, int64, error) {
sess := GetEngine(ctx).Where(opts.ToConds())
if !opts.IsListAll() {
sess.Limit(opts.GetSkipTake())
page, pageSize := opts.GetPage(), opts.GetPageSize()
if !opts.IsListAll() && pageSize > 0 && page >= 1 {
sess.Limit(pageSize, (page-1)*pageSize)
}
return sess.FindAndCount(objects)
if newOpt, ok := opts.(FindOptionsOrder); ok && newOpt.ToOrders() != "" {
sess.OrderBy(newOpt.ToOrders())
}
findPageSize := defaultFindSliceSize
if pageSize > 0 {
findPageSize = pageSize
}
objects := make([]*T, 0, findPageSize)
cnt, err := sess.FindAndCount(&objects)
if err != nil {
return nil, 0, err
}
return objects, cnt, nil
}

View File

@ -18,11 +18,11 @@ type mockListOptions struct {
db.ListOptions
}
func (opts *mockListOptions) IsListAll() bool {
func (opts mockListOptions) IsListAll() bool {
return true
}
func (opts *mockListOptions) ToConds() builder.Cond {
func (opts mockListOptions) ToConds() builder.Cond {
return builder.NewCond()
}
@ -37,17 +37,16 @@ func TestFind(t *testing.T) {
assert.NotEmpty(t, repoUnitCount)
opts := mockListOptions{}
var repoUnits []repo_model.RepoUnit
err = db.Find(db.DefaultContext, &opts, &repoUnits)
repoUnits, err := db.Find[repo_model.RepoUnit](db.DefaultContext, opts)
assert.NoError(t, err)
assert.Len(t, repoUnits, repoUnitCount)
cnt, err := db.Count(db.DefaultContext, &opts, new(repo_model.RepoUnit))
cnt, err := db.Count[repo_model.RepoUnit](db.DefaultContext, opts)
assert.NoError(t, err)
assert.EqualValues(t, repoUnitCount, cnt)
repoUnits = make([]repo_model.RepoUnit, 0, 10)
newCnt, err := db.FindAndCount(db.DefaultContext, &opts, &repoUnits)
repoUnits, newCnt, err := db.FindAndCount[repo_model.RepoUnit](db.DefaultContext, opts)
assert.NoError(t, err)
assert.EqualValues(t, cnt, newCnt)
assert.Len(t, repoUnits, repoUnitCount)
}

View File

@ -66,3 +66,12 @@
tree_path: "README.md"
created_unix: 946684812
invalidated: true
-
id: 8
type: 0 # comment
poster_id: 2
issue_id: 4 # in repo_id 2
content: "comment in private pository"
created_unix: 946684811
updated_unix: 946684811

View File

@ -61,7 +61,7 @@
priority: 0
is_closed: true
is_pull: false
num_comments: 0
num_comments: 1
created_unix: 946684830
updated_unix: 978307200
is_locked: false

View File

@ -1024,10 +1024,11 @@ type FindCommentsOptions struct {
Type CommentType
IssueIDs []int64
Invalidated util.OptionalBool
IsPull util.OptionalBool
}
// ToConds implements FindOptions interface
func (opts *FindCommentsOptions) ToConds() builder.Cond {
func (opts FindCommentsOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"issue.repo_id": opts.RepoID})
@ -1058,6 +1059,9 @@ func (opts *FindCommentsOptions) ToConds() builder.Cond {
if !opts.Invalidated.IsNone() {
cond = cond.And(builder.Eq{"comment.invalidated": opts.Invalidated.IsTrue()})
}
if opts.IsPull != util.OptionalBoolNone {
cond = cond.And(builder.Eq{"issue.is_pull": opts.IsPull.IsTrue()})
}
return cond
}
@ -1065,7 +1069,7 @@ func (opts *FindCommentsOptions) ToConds() builder.Cond {
func FindComments(ctx context.Context, opts *FindCommentsOptions) (CommentList, error) {
comments := make([]*Comment, 0, 10)
sess := db.GetEngine(ctx).Where(opts.ToConds())
if opts.RepoID > 0 {
if opts.RepoID > 0 || opts.IsPull != util.OptionalBoolNone {
sess.Join("INNER", "issue", "issue.id = comment.issue_id")
}

View File

@ -218,9 +218,9 @@ func GetIssueContentHistoryByID(dbCtx context.Context, id int64) (*ContentHistor
}
// GetIssueContentHistoryAndPrev get a history and the previous non-deleted history (to compare)
func GetIssueContentHistoryAndPrev(dbCtx context.Context, id int64) (history, prevHistory *ContentHistory, err error) {
func GetIssueContentHistoryAndPrev(dbCtx context.Context, issueID, id int64) (history, prevHistory *ContentHistory, err error) {
history = &ContentHistory{}
has, err := db.GetEngine(dbCtx).ID(id).Get(history)
has, err := db.GetEngine(dbCtx).Where("id=? AND issue_id=?", id, issueID).Get(history)
if err != nil {
log.Error("failed to get issue content history %v. err=%v", id, err)
return nil, nil, err

View File

@ -58,13 +58,13 @@ func TestContentHistory(t *testing.T) {
hasHistory2, _ := issues_model.HasIssueContentHistory(dbCtx, 10, 1)
assert.False(t, hasHistory2)
h6, h6Prev, _ := issues_model.GetIssueContentHistoryAndPrev(dbCtx, 6)
h6, h6Prev, _ := issues_model.GetIssueContentHistoryAndPrev(dbCtx, 10, 6)
assert.EqualValues(t, 6, h6.ID)
assert.EqualValues(t, 5, h6Prev.ID)
// soft-delete
_ = issues_model.SoftDeleteIssueContentHistory(dbCtx, 5)
h6, h6Prev, _ = issues_model.GetIssueContentHistoryAndPrev(dbCtx, 6)
h6, h6Prev, _ = issues_model.GetIssueContentHistoryAndPrev(dbCtx, 10, 6)
assert.EqualValues(t, 6, h6.ID)
assert.EqualValues(t, 4, h6Prev.ID)

View File

@ -32,7 +32,12 @@ func AddGitSizeAndLFSSizeToRepositoryTable(x *xorm.Engine) error {
return err
}
_, err = sess.Exec(`UPDATE repository SET git_size = size - lfs_size`)
_, err = sess.Exec(`UPDATE repository SET size = 0 WHERE size IS NULL`)
if err != nil {
return err
}
_, err = sess.Exec(`UPDATE repository SET git_size = size - lfs_size WHERE size > lfs_size`)
if err != nil {
return err
}

View File

@ -456,7 +456,7 @@ func queryUserOrgIDs(userID int64, includePrivate bool) *builder.Builder {
return builder.Select("org_id").From("org_user").Where(cond)
}
func (opts FindOrgOptions) toConds() builder.Cond {
func (opts FindOrgOptions) ToConds() builder.Cond {
var cond builder.Cond = builder.Eq{"`user`.`type`": user_model.UserTypeOrganization}
if opts.UserID > 0 {
cond = cond.And(builder.In("`user`.`id`", queryUserOrgIDs(opts.UserID, opts.IncludePrivate)))
@ -467,23 +467,8 @@ func (opts FindOrgOptions) toConds() builder.Cond {
return cond
}
// FindOrgs returns a list of organizations according given conditions
func FindOrgs(ctx context.Context, opts FindOrgOptions) ([]*Organization, error) {
orgs := make([]*Organization, 0, 10)
sess := db.GetEngine(ctx).
Where(opts.toConds()).
Asc("`user`.name")
if opts.Page > 0 && opts.PageSize > 0 {
sess.Limit(opts.PageSize, opts.PageSize*(opts.Page-1))
}
return orgs, sess.Find(&orgs)
}
// CountOrgs returns total count organizations according options
func CountOrgs(ctx context.Context, opts FindOrgOptions) (int64, error) {
return db.GetEngine(ctx).
Where(opts.toConds()).
Count(new(Organization))
func (opts FindOrgOptions) ToOrders() string {
return "`user`.name ASC"
}
// HasOrgOrUserVisible tells if the given user can see the given org or user

View File

@ -131,7 +131,7 @@ func TestCountOrganizations(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
expected, err := db.GetEngine(db.DefaultContext).Where("type=?", user_model.UserTypeOrganization).Count(&organization.Organization{})
assert.NoError(t, err)
cnt, err := organization.CountOrgs(db.DefaultContext, organization.FindOrgOptions{IncludePrivate: true})
cnt, err := db.Count[organization.Organization](db.DefaultContext, organization.FindOrgOptions{IncludePrivate: true})
assert.NoError(t, err)
assert.Equal(t, expected, cnt)
}
@ -183,7 +183,7 @@ func TestIsPublicMembership(t *testing.T) {
func TestFindOrgs(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
orgs, err := organization.FindOrgs(db.DefaultContext, organization.FindOrgOptions{
orgs, err := db.Find[organization.Organization](db.DefaultContext, organization.FindOrgOptions{
UserID: 4,
IncludePrivate: true,
})
@ -192,14 +192,14 @@ func TestFindOrgs(t *testing.T) {
assert.EqualValues(t, 3, orgs[0].ID)
}
orgs, err = organization.FindOrgs(db.DefaultContext, organization.FindOrgOptions{
orgs, err = db.Find[organization.Organization](db.DefaultContext, organization.FindOrgOptions{
UserID: 4,
IncludePrivate: false,
})
assert.NoError(t, err)
assert.Len(t, orgs, 0)
total, err := organization.CountOrgs(db.DefaultContext, organization.FindOrgOptions{
total, err := db.Count[organization.Organization](db.DefaultContext, organization.FindOrgOptions{
UserID: 4,
IncludePrivate: true,
})

View File

@ -192,16 +192,16 @@ func IsTypeValid(p Type) bool {
// SearchOptions are options for GetProjects
type SearchOptions struct {
db.ListOptions
OwnerID int64
RepoID int64
Page int
IsClosed util.OptionalBool
OrderBy db.SearchOrderBy
Type Type
Title string
}
func (opts *SearchOptions) toConds() builder.Cond {
func (opts SearchOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
@ -226,9 +226,8 @@ func (opts *SearchOptions) toConds() builder.Cond {
return cond
}
// CountProjects counts projects
func CountProjects(ctx context.Context, opts SearchOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(Project))
func (opts SearchOptions) ToOrders() string {
return opts.OrderBy.String()
}
func GetSearchOrderByBySortType(sortType string) db.SearchOrderBy {
@ -244,22 +243,6 @@ func GetSearchOrderByBySortType(sortType string) db.SearchOrderBy {
}
}
// FindProjects returns a list of all projects that have been created in the repository
func FindProjects(ctx context.Context, opts SearchOptions) ([]*Project, int64, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.OrderBy.String() != "" {
e = e.OrderBy(opts.OrderBy.String())
}
projects := make([]*Project, 0, setting.UI.IssuePagingNum)
if opts.Page > 0 {
e = e.Limit(setting.UI.IssuePagingNum, (opts.Page-1)*setting.UI.IssuePagingNum)
}
count, err := e.FindAndCount(&projects)
return projects, count, err
}
// NewProject creates a new Project
func NewProject(ctx context.Context, p *Project) error {
if !IsBoardTypeValid(p.BoardType) {
@ -311,6 +294,18 @@ func GetProjectByID(ctx context.Context, id int64) (*Project, error) {
return p, nil
}
// GetProjectForRepoByID returns the projects in a repository
func GetProjectForRepoByID(ctx context.Context, repoID, id int64) (*Project, error) {
p := new(Project)
has, err := db.GetEngine(ctx).Where("id=? AND repo_id=?", id, repoID).Get(p)
if err != nil {
return nil, err
} else if !has {
return nil, ErrProjectNotExist{ID: id}
}
return p, nil
}
// UpdateProject updates project properties
func UpdateProject(ctx context.Context, p *Project) error {
if !IsCardTypeValid(p.CardType) {

View File

@ -34,13 +34,13 @@ func TestIsProjectTypeValid(t *testing.T) {
func TestGetProjects(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
projects, _, err := FindProjects(db.DefaultContext, SearchOptions{RepoID: 1})
projects, err := db.Find[Project](db.DefaultContext, SearchOptions{RepoID: 1})
assert.NoError(t, err)
// 1 value for this repo exists in the fixtures
assert.Len(t, projects, 1)
projects, _, err = FindProjects(db.DefaultContext, SearchOptions{RepoID: 3})
projects, err = db.Find[Project](db.DefaultContext, SearchOptions{RepoID: 3})
assert.NoError(t, err)
// 1 value for this repo exists in the fixtures
@ -109,7 +109,7 @@ func TestProjectsSort(t *testing.T) {
}
for _, tt := range tests {
projects, count, err := FindProjects(db.DefaultContext, SearchOptions{
projects, count, err := db.FindAndCount[Project](db.DefaultContext, SearchOptions{
OrderBy: GetSearchOrderByBySortType(tt.sortType),
})
assert.NoError(t, err)

View File

@ -207,6 +207,21 @@ func GetReleaseByID(ctx context.Context, id int64) (*Release, error) {
return rel, nil
}
// GetReleaseForRepoByID returns release with given ID.
func GetReleaseForRepoByID(ctx context.Context, repoID, id int64) (*Release, error) {
rel := new(Release)
has, err := db.GetEngine(ctx).
Where("id=? AND repo_id=?", id, repoID).
Get(rel)
if err != nil {
return nil, err
} else if !has {
return nil, ErrReleaseNotExist{id, ""}
}
return rel, nil
}
// FindReleasesOptions describes the conditions to Find releases
type FindReleasesOptions struct {
db.ListOptions

View File

@ -584,9 +584,9 @@ func (repo *Repository) DescriptionHTML(ctx context.Context) template.HTML {
}, repo.Description)
if err != nil {
log.Error("Failed to render description for %s (ID: %d): %v", repo.Name, repo.ID, err)
return template.HTML(markup.Sanitize(repo.Description))
return template.HTML(markup.SanitizeDescription(repo.Description))
}
return template.HTML(markup.Sanitize(desc))
return template.HTML(markup.SanitizeDescription(desc))
}
// CloneLink represents different types of clone URLs of repository.

View File

@ -78,7 +78,7 @@ type FindSecretsOptions struct {
Name string
}
func (opts *FindSecretsOptions) toConds() builder.Cond {
func (opts FindSecretsOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
@ -96,22 +96,6 @@ func (opts *FindSecretsOptions) toConds() builder.Cond {
return cond
}
func FindSecrets(ctx context.Context, opts FindSecretsOptions) ([]*Secret, error) {
var secrets []*Secret
sess := db.GetEngine(ctx)
if opts.PageSize != 0 {
sess = db.SetSessionPagination(sess, &opts.ListOptions)
}
return secrets, sess.
Where(opts.toConds()).
Find(&secrets)
}
// CountSecrets counts the secrets
func CountSecrets(ctx context.Context, opts *FindSecretsOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(Secret))
}
// UpdateSecret changes org or user reop secret.
func UpdateSecret(ctx context.Context, secretID int64, data string) error {
encrypted, err := secret_module.EncryptSecret(setting.SecretKey, data)

View File

@ -81,7 +81,7 @@ func SetSettings(ctx context.Context, settings map[string]string) error {
return err
}
for k, v := range settings {
res, err := e.Exec("UPDATE system_setting SET setting_value=? WHERE setting_key=?", v, k)
res, err := e.Exec("UPDATE system_setting SET version=version+1, setting_value=? WHERE setting_key=?", v, k)
if err != nil {
return err
}
@ -115,24 +115,26 @@ func (d *dbConfigCachedGetter) GetValue(ctx context.Context, key string) (v stri
func (d *dbConfigCachedGetter) GetRevision(ctx context.Context) int {
d.mu.RLock()
defer d.mu.RUnlock()
if time.Since(d.cacheTime) < time.Second {
return d.revision
cachedDuration := time.Since(d.cacheTime)
cachedRevision := d.revision
d.mu.RUnlock()
if cachedDuration < time.Second {
return cachedRevision
}
d.mu.Lock()
defer d.mu.Unlock()
if GetRevision(ctx) != d.revision {
d.mu.RUnlock()
d.mu.Lock()
rev, set, err := GetAllSettings(ctx)
if err != nil {
log.Error("Unable to get all settings: %v", err)
} else {
d.cacheTime = time.Now()
d.revision = rev
d.settings = set
}
d.mu.Unlock()
d.mu.RLock()
}
d.cacheTime = time.Now()
return d.revision
}

View File

@ -39,4 +39,16 @@ func TestSettings(t *testing.T) {
assert.EqualValues(t, 3, rev)
assert.Len(t, settings, 2)
assert.EqualValues(t, "false", settings[keyName])
// setting the same value should not trigger DuplicateKey error, and the "version" should be increased
setting := &system.Setting{SettingKey: keyName}
_, err = db.GetByBean(db.DefaultContext, setting)
assert.NoError(t, err)
assert.EqualValues(t, 2, setting.Version)
err = system.SetSettings(db.DefaultContext, map[string]string{keyName: "false"})
assert.NoError(t, err)
setting = &system.Setting{SettingKey: keyName}
_, err = db.GetByBean(db.DefaultContext, setting)
assert.NoError(t, err)
assert.EqualValues(t, 3, setting.Version)
}

View File

@ -96,19 +96,6 @@ func GetExternalLogin(ctx context.Context, externalLoginUser *ExternalLoginUser)
return db.GetEngine(ctx).Get(externalLoginUser)
}
// ListAccountLinks returns a map with the ExternalLoginUser and its LoginSource
func ListAccountLinks(ctx context.Context, user *User) ([]*ExternalLoginUser, error) {
externalAccounts := make([]*ExternalLoginUser, 0, 5)
err := db.GetEngine(ctx).Where("user_id=?", user.ID).
Desc("login_source_id").
Find(&externalAccounts)
if err != nil {
return nil, err
}
return externalAccounts, nil
}
// LinkExternalToUser link the external user to the user
func LinkExternalToUser(ctx context.Context, user *User, externalLoginUser *ExternalLoginUser) error {
has, err := db.GetEngine(ctx).Where("external_id=? AND login_source_id=?", externalLoginUser.ExternalID, externalLoginUser.LoginSourceID).
@ -173,28 +160,23 @@ func UpdateExternalUserByExternalID(ctx context.Context, external *ExternalLogin
// FindExternalUserOptions represents an options to find external users
type FindExternalUserOptions struct {
db.ListOptions
Provider string
Limit int
Start int
UserID int64
OrderBy string
}
func (opts FindExternalUserOptions) toConds() builder.Cond {
func (opts FindExternalUserOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if len(opts.Provider) > 0 {
cond = cond.And(builder.Eq{"provider": opts.Provider})
}
if opts.UserID > 0 {
cond = cond.And(builder.Eq{"user_id": opts.UserID})
}
return cond
}
// FindExternalUsersByProvider represents external users via provider
func FindExternalUsersByProvider(ctx context.Context, opts FindExternalUserOptions) ([]ExternalLoginUser, error) {
var users []ExternalLoginUser
err := db.GetEngine(ctx).Where(opts.toConds()).
Limit(opts.Limit, opts.Start).
OrderBy("login_source_id ASC, external_id ASC").
Find(&users)
if err != nil {
return nil, err
}
return users, nil
func (opts FindExternalUserOptions) ToOrders() string {
return opts.OrderBy
}

View File

@ -392,39 +392,40 @@ func CreateWebhooks(ctx context.Context, ws []*Webhook) error {
return db.Insert(ctx, ws)
}
// getWebhook uses argument bean as query condition,
// ID must be specified and do not assign unnecessary fields.
func getWebhook(ctx context.Context, bean *Webhook) (*Webhook, error) {
has, err := db.GetEngine(ctx).Get(bean)
// GetWebhookByID returns webhook of repository by given ID.
func GetWebhookByID(ctx context.Context, id int64) (*Webhook, error) {
bean := new(Webhook)
has, err := db.GetEngine(ctx).ID(id).Get(bean)
if err != nil {
return nil, err
} else if !has {
return nil, ErrWebhookNotExist{ID: bean.ID}
return nil, ErrWebhookNotExist{ID: id}
}
return bean, nil
}
// GetWebhookByID returns webhook of repository by given ID.
func GetWebhookByID(ctx context.Context, id int64) (*Webhook, error) {
return getWebhook(ctx, &Webhook{
ID: id,
})
}
// GetWebhookByRepoID returns webhook of repository by given ID.
func GetWebhookByRepoID(ctx context.Context, repoID, id int64) (*Webhook, error) {
return getWebhook(ctx, &Webhook{
ID: id,
RepoID: repoID,
})
webhook := new(Webhook)
has, err := db.GetEngine(ctx).Where("id=? AND repo_id=?", id, repoID).Get(webhook)
if err != nil {
return nil, err
} else if !has {
return nil, ErrWebhookNotExist{ID: id}
}
return webhook, nil
}
// GetWebhookByOwnerID returns webhook of a user or organization by given ID.
func GetWebhookByOwnerID(ctx context.Context, ownerID, id int64) (*Webhook, error) {
return getWebhook(ctx, &Webhook{
ID: id,
OwnerID: ownerID,
})
webhook := new(Webhook)
has, err := db.GetEngine(ctx).Where("id=? AND owner_id=?", id, ownerID).Get(webhook)
if err != nil {
return nil, err
} else if !has {
return nil, ErrWebhookNotExist{ID: id}
}
return webhook, nil
}
// ListWebhookOptions are options to filter webhooks on ListWebhooksByOpts
@ -435,7 +436,7 @@ type ListWebhookOptions struct {
IsActive util.OptionalBool
}
func (opts *ListWebhookOptions) toCond() builder.Cond {
func (opts ListWebhookOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID != 0 {
cond = cond.And(builder.Eq{"webhook.repo_id": opts.RepoID})
@ -449,27 +450,6 @@ func (opts *ListWebhookOptions) toCond() builder.Cond {
return cond
}
// ListWebhooksByOpts return webhooks based on options
func ListWebhooksByOpts(ctx context.Context, opts *ListWebhookOptions) ([]*Webhook, error) {
sess := db.GetEngine(ctx).Where(opts.toCond())
if opts.Page != 0 {
sess = db.SetSessionPagination(sess, opts)
webhooks := make([]*Webhook, 0, opts.PageSize)
err := sess.Find(&webhooks)
return webhooks, err
}
webhooks := make([]*Webhook, 0, 10)
err := sess.Find(&webhooks)
return webhooks, err
}
// CountWebhooksByOpts count webhooks based on options and ignore pagination
func CountWebhooksByOpts(ctx context.Context, opts *ListWebhookOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toCond()).Count(&Webhook{})
}
// UpdateWebhook updates information of webhook.
func UpdateWebhook(ctx context.Context, w *Webhook) error {
_, err := db.GetEngine(ctx).ID(w.ID).AllCols().Update(w)
@ -482,20 +462,20 @@ func UpdateWebhookLastStatus(ctx context.Context, w *Webhook) error {
return err
}
// deleteWebhook uses argument bean as query condition,
// DeleteWebhookByID uses argument bean as query condition,
// ID must be specified and do not assign unnecessary fields.
func deleteWebhook(ctx context.Context, bean *Webhook) (err error) {
func DeleteWebhookByID(ctx context.Context, id int64) (err error) {
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
}
defer committer.Close()
if count, err := db.DeleteByBean(ctx, bean); err != nil {
if count, err := db.DeleteByID(ctx, id, new(Webhook)); err != nil {
return err
} else if count == 0 {
return ErrWebhookNotExist{ID: bean.ID}
} else if _, err = db.DeleteByBean(ctx, &HookTask{HookID: bean.ID}); err != nil {
return ErrWebhookNotExist{ID: id}
} else if _, err = db.DeleteByBean(ctx, &HookTask{HookID: id}); err != nil {
return err
}
@ -504,16 +484,16 @@ func deleteWebhook(ctx context.Context, bean *Webhook) (err error) {
// DeleteWebhookByRepoID deletes webhook of repository by given ID.
func DeleteWebhookByRepoID(ctx context.Context, repoID, id int64) error {
return deleteWebhook(ctx, &Webhook{
ID: id,
RepoID: repoID,
})
if _, err := GetWebhookByRepoID(ctx, repoID, id); err != nil {
return err
}
return DeleteWebhookByID(ctx, id)
}
// DeleteWebhookByOwnerID deletes webhook of a user or organization by given ID.
func DeleteWebhookByOwnerID(ctx context.Context, ownerID, id int64) error {
return deleteWebhook(ctx, &Webhook{
ID: id,
OwnerID: ownerID,
})
if _, err := GetWebhookByOwnerID(ctx, ownerID, id); err != nil {
return err
}
return DeleteWebhookByID(ctx, id)
}

View File

@ -123,7 +123,7 @@ func TestGetWebhookByOwnerID(t *testing.T) {
func TestGetActiveWebhooksByRepoID(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
hooks, err := ListWebhooksByOpts(db.DefaultContext, &ListWebhookOptions{RepoID: 1, IsActive: util.OptionalBoolTrue})
hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{RepoID: 1, IsActive: util.OptionalBoolTrue})
assert.NoError(t, err)
if assert.Len(t, hooks, 1) {
assert.Equal(t, int64(1), hooks[0].ID)
@ -133,7 +133,7 @@ func TestGetActiveWebhooksByRepoID(t *testing.T) {
func TestGetWebhooksByRepoID(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
hooks, err := ListWebhooksByOpts(db.DefaultContext, &ListWebhookOptions{RepoID: 1})
hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{RepoID: 1})
assert.NoError(t, err)
if assert.Len(t, hooks, 2) {
assert.Equal(t, int64(1), hooks[0].ID)
@ -143,7 +143,7 @@ func TestGetWebhooksByRepoID(t *testing.T) {
func TestGetActiveWebhooksByOwnerID(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
hooks, err := ListWebhooksByOpts(db.DefaultContext, &ListWebhookOptions{OwnerID: 3, IsActive: util.OptionalBoolTrue})
hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{OwnerID: 3, IsActive: util.OptionalBoolTrue})
assert.NoError(t, err)
if assert.Len(t, hooks, 1) {
assert.Equal(t, int64(3), hooks[0].ID)
@ -153,7 +153,7 @@ func TestGetActiveWebhooksByOwnerID(t *testing.T) {
func TestGetWebhooksByOwnerID(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
hooks, err := ListWebhooksByOpts(db.DefaultContext, &ListWebhookOptions{OwnerID: 3})
hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{OwnerID: 3})
assert.NoError(t, err)
if assert.Len(t, hooks, 1) {
assert.Equal(t, int64(3), hooks[0].ID)

View File

@ -563,6 +563,7 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
ctx.Data["CanWriteCode"] = ctx.Repo.CanWrite(unit_model.TypeCode)
ctx.Data["CanWriteIssues"] = ctx.Repo.CanWrite(unit_model.TypeIssues)
ctx.Data["CanWritePulls"] = ctx.Repo.CanWrite(unit_model.TypePullRequests)
ctx.Data["CanWriteActions"] = ctx.Repo.CanWrite(unit_model.TypeActions)
canSignedUserFork, err := repo_module.CanUserForkRepo(ctx, ctx.Doer, ctx.Repo.Repository)
if err != nil {

View File

@ -26,7 +26,7 @@ func handleDeleteOrphanedRepos(ctx context.Context, logger log.Logger, autofix b
// countOrphanedRepos count repository where user of owner_id do not exist
func countOrphanedRepos(ctx context.Context) (int64, error) {
return db.CountOrphanedObjects(ctx, "repository", "user", "repository.owner_id=user.id")
return db.CountOrphanedObjects(ctx, "repository", "user", "repository.owner_id=`user`.id")
}
// deleteOrphanedRepos delete repository where user of owner_id do not exist
@ -43,7 +43,7 @@ func deleteOrphanedRepos(ctx context.Context) (int64, error) {
default:
var ids []int64
if err := e.Table("`repository`").
Join("LEFT", "`user`", "repository.owner_id=user.id").
Join("LEFT", "`user`", "repository.owner_id=`user`.id").
Where(builder.IsNull{"`user`.id"}).
Select("`repository`.id").Limit(batchSize).Find(&ids); err != nil {
return deleted, err

View File

@ -11,6 +11,7 @@ import (
"io"
"os"
"regexp"
"strings"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/util"
@ -18,8 +19,10 @@ import (
// BlamePart represents block of blame - continuous lines with one sha
type BlamePart struct {
Sha string
Lines []string
Sha string
Lines []string
PreviousSha string
PreviousPath string
}
// BlameReader returns part of file blame one by one
@ -43,30 +46,38 @@ func (r *BlameReader) NextPart() (*BlamePart, error) {
var blamePart *BlamePart
if r.lastSha != nil {
blamePart = &BlamePart{*r.lastSha, make([]string, 0)}
blamePart = &BlamePart{
Sha: *r.lastSha,
Lines: make([]string, 0),
}
}
var line []byte
var lineBytes []byte
var isPrefix bool
var err error
for err != io.EOF {
line, isPrefix, err = r.bufferedReader.ReadLine()
lineBytes, isPrefix, err = r.bufferedReader.ReadLine()
if err != nil && err != io.EOF {
return blamePart, err
}
if len(line) == 0 {
if len(lineBytes) == 0 {
// isPrefix will be false
continue
}
lines := shaLineRegex.FindSubmatch(line)
line := string(lineBytes)
lines := shaLineRegex.FindStringSubmatch(line)
if lines != nil {
sha1 := string(lines[1])
sha1 := lines[1]
if blamePart == nil {
blamePart = &BlamePart{sha1, make([]string, 0)}
blamePart = &BlamePart{
Sha: sha1,
Lines: make([]string, 0),
}
}
if blamePart.Sha != sha1 {
@ -81,9 +92,11 @@ func (r *BlameReader) NextPart() (*BlamePart, error) {
return blamePart, nil
}
} else if line[0] == '\t' {
code := line[1:]
blamePart.Lines = append(blamePart.Lines, string(code))
blamePart.Lines = append(blamePart.Lines, line[1:])
} else if strings.HasPrefix(line, "previous ") {
parts := strings.SplitN(line[len("previous "):], " ", 2)
blamePart.PreviousSha = parts[0]
blamePart.PreviousPath = parts[1]
}
// need to munch to end of line...

View File

@ -24,15 +24,17 @@ func TestReadingBlameOutput(t *testing.T) {
parts := []*BlamePart{
{
"72866af952e98d02a73003501836074b286a78f6",
[]string{
Sha: "72866af952e98d02a73003501836074b286a78f6",
Lines: []string{
"# test_repo",
"Test repository for testing migration from github to gitea",
},
},
{
"f32b0a9dfd09a60f616f29158f772cedd89942d2",
[]string{"", "Do not make any changes to this repo it is used for unit testing"},
Sha: "f32b0a9dfd09a60f616f29158f772cedd89942d2",
Lines: []string{"", "Do not make any changes to this repo it is used for unit testing"},
PreviousSha: "72866af952e98d02a73003501836074b286a78f6",
PreviousPath: "README.md",
},
}
@ -64,16 +66,18 @@ func TestReadingBlameOutput(t *testing.T) {
full := []*BlamePart{
{
"af7486bd54cfc39eea97207ca666aa69c9d6df93",
[]string{"line", "line"},
Sha: "af7486bd54cfc39eea97207ca666aa69c9d6df93",
Lines: []string{"line", "line"},
},
{
"45fb6cbc12f970b04eacd5cd4165edd11c8d7376",
[]string{"changed line"},
Sha: "45fb6cbc12f970b04eacd5cd4165edd11c8d7376",
Lines: []string{"changed line"},
PreviousSha: "af7486bd54cfc39eea97207ca666aa69c9d6df93",
PreviousPath: "blame.txt",
},
{
"af7486bd54cfc39eea97207ca666aa69c9d6df93",
[]string{"line", "line", ""},
Sha: "af7486bd54cfc39eea97207ca666aa69c9d6df93",
Lines: []string{"line", "line", ""},
},
}
@ -89,8 +93,8 @@ func TestReadingBlameOutput(t *testing.T) {
Bypass: false,
Parts: []*BlamePart{
{
"af7486bd54cfc39eea97207ca666aa69c9d6df93",
[]string{"line", "line", "changed line", "line", "line", ""},
Sha: "af7486bd54cfc39eea97207ca666aa69c9d6df93",
Lines: []string{"line", "line", "changed line", "line", "line", ""},
},
},
},

View File

@ -43,8 +43,9 @@ func (c *Commit) Message() string {
}
// Summary returns first line of commit message.
// The string is forced to be valid UTF8
func (c *Commit) Summary() string {
return strings.Split(strings.TrimSpace(c.CommitMessage), "\n")[0]
return strings.ToValidUTF8(strings.Split(strings.TrimSpace(c.CommitMessage), "\n")[0], "?")
}
// ParentID returns oid of n-th parent (0-based index).

View File

@ -0,0 +1,104 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package graceful
import (
"context"
"runtime/pprof"
"sync"
"time"
)
type systemdNotifyMsg string
const (
readyMsg systemdNotifyMsg = "READY=1"
stoppingMsg systemdNotifyMsg = "STOPPING=1"
reloadingMsg systemdNotifyMsg = "RELOADING=1"
watchdogMsg systemdNotifyMsg = "WATCHDOG=1"
)
func statusMsg(msg string) systemdNotifyMsg {
return systemdNotifyMsg("STATUS=" + msg)
}
// Manager manages the graceful shutdown process
type Manager struct {
ctx context.Context
isChild bool
forked bool
lock sync.RWMutex
state state
shutdownCtx context.Context
hammerCtx context.Context
terminateCtx context.Context
managerCtx context.Context
shutdownCtxCancel context.CancelFunc
hammerCtxCancel context.CancelFunc
terminateCtxCancel context.CancelFunc
managerCtxCancel context.CancelFunc
runningServerWaitGroup sync.WaitGroup
createServerWaitGroup sync.WaitGroup
terminateWaitGroup sync.WaitGroup
shutdownRequested chan struct{}
toRunAtShutdown []func()
toRunAtTerminate []func()
}
func newGracefulManager(ctx context.Context) *Manager {
manager := &Manager{ctx: ctx, shutdownRequested: make(chan struct{})}
manager.createServerWaitGroup.Add(numberOfServersToCreate)
manager.prepare(ctx)
manager.start()
return manager
}
func (g *Manager) prepare(ctx context.Context) {
g.terminateCtx, g.terminateCtxCancel = context.WithCancel(ctx)
g.shutdownCtx, g.shutdownCtxCancel = context.WithCancel(ctx)
g.hammerCtx, g.hammerCtxCancel = context.WithCancel(ctx)
g.managerCtx, g.managerCtxCancel = context.WithCancel(ctx)
g.terminateCtx = pprof.WithLabels(g.terminateCtx, pprof.Labels("graceful-lifecycle", "with-terminate"))
g.shutdownCtx = pprof.WithLabels(g.shutdownCtx, pprof.Labels("graceful-lifecycle", "with-shutdown"))
g.hammerCtx = pprof.WithLabels(g.hammerCtx, pprof.Labels("graceful-lifecycle", "with-hammer"))
g.managerCtx = pprof.WithLabels(g.managerCtx, pprof.Labels("graceful-lifecycle", "with-manager"))
if !g.setStateTransition(stateInit, stateRunning) {
panic("invalid graceful manager state: transition from init to running failed")
}
}
// DoImmediateHammer causes an immediate hammer
func (g *Manager) DoImmediateHammer() {
g.notify(statusMsg("Sending immediate hammer"))
g.doHammerTime(0 * time.Second)
}
// DoGracefulShutdown causes a graceful shutdown
func (g *Manager) DoGracefulShutdown() {
g.lock.Lock()
select {
case <-g.shutdownRequested:
default:
close(g.shutdownRequested)
}
forked := g.forked
g.lock.Unlock()
if !forked {
g.notify(stoppingMsg)
} else {
g.notify(statusMsg("Shutting down after fork"))
}
g.doShutdown()
}
// RegisterServer registers the running of a listening server, in the case of unix this means that the parent process can now die.
// Any call to RegisterServer must be matched by a call to ServerDone
func (g *Manager) RegisterServer() {
KillParent()
g.runningServerWaitGroup.Add(1)
}

View File

@ -12,7 +12,6 @@ import (
"os/signal"
"runtime/pprof"
"strconv"
"sync"
"syscall"
"time"
@ -22,51 +21,6 @@ import (
"code.gitea.io/gitea/modules/setting"
)
// Manager manages the graceful shutdown process
type Manager struct {
isChild bool
forked bool
lock *sync.RWMutex
state state
shutdownCtx context.Context
hammerCtx context.Context
terminateCtx context.Context
managerCtx context.Context
shutdownCtxCancel context.CancelFunc
hammerCtxCancel context.CancelFunc
terminateCtxCancel context.CancelFunc
managerCtxCancel context.CancelFunc
runningServerWaitGroup sync.WaitGroup
createServerWaitGroup sync.WaitGroup
terminateWaitGroup sync.WaitGroup
toRunAtShutdown []func()
toRunAtTerminate []func()
}
func newGracefulManager(ctx context.Context) *Manager {
manager := &Manager{
isChild: len(os.Getenv(listenFDsEnv)) > 0 && os.Getppid() > 1,
lock: &sync.RWMutex{},
}
manager.createServerWaitGroup.Add(numberOfServersToCreate)
manager.start(ctx)
return manager
}
type systemdNotifyMsg string
const (
readyMsg systemdNotifyMsg = "READY=1"
stoppingMsg systemdNotifyMsg = "STOPPING=1"
reloadingMsg systemdNotifyMsg = "RELOADING=1"
watchdogMsg systemdNotifyMsg = "WATCHDOG=1"
)
func statusMsg(msg string) systemdNotifyMsg {
return systemdNotifyMsg("STATUS=" + msg)
}
func pidMsg() systemdNotifyMsg {
return systemdNotifyMsg("MAINPID=" + strconv.Itoa(os.Getpid()))
}
@ -89,27 +43,13 @@ func (g *Manager) notify(msg systemdNotifyMsg) {
}
}
func (g *Manager) start(ctx context.Context) {
// Make contexts
g.terminateCtx, g.terminateCtxCancel = context.WithCancel(ctx)
g.shutdownCtx, g.shutdownCtxCancel = context.WithCancel(ctx)
g.hammerCtx, g.hammerCtxCancel = context.WithCancel(ctx)
g.managerCtx, g.managerCtxCancel = context.WithCancel(ctx)
// Next add pprof labels to these contexts
g.terminateCtx = pprof.WithLabels(g.terminateCtx, pprof.Labels("graceful-lifecycle", "with-terminate"))
g.shutdownCtx = pprof.WithLabels(g.shutdownCtx, pprof.Labels("graceful-lifecycle", "with-shutdown"))
g.hammerCtx = pprof.WithLabels(g.hammerCtx, pprof.Labels("graceful-lifecycle", "with-hammer"))
g.managerCtx = pprof.WithLabels(g.managerCtx, pprof.Labels("graceful-lifecycle", "with-manager"))
func (g *Manager) start() {
// Now label this and all goroutines created by this goroutine with the graceful-lifecycle manager
pprof.SetGoroutineLabels(g.managerCtx)
defer pprof.SetGoroutineLabels(ctx)
defer pprof.SetGoroutineLabels(g.ctx)
g.isChild = len(os.Getenv(listenFDsEnv)) > 0 && os.Getppid() > 1
// Set the running state & handle signals
if !g.setStateTransition(stateInit, stateRunning) {
panic("invalid graceful manager state: transition from init to running failed")
}
g.notify(statusMsg("Starting Gitea"))
g.notify(pidMsg())
go g.handleSignals(g.managerCtx)
@ -118,11 +58,9 @@ func (g *Manager) start(ctx context.Context) {
startupDone := make(chan struct{})
go func() {
defer close(startupDone)
// Wait till we're done getting all of the listeners and then close
// the unused ones
// Wait till we're done getting all the listeners and then close the unused ones
g.createServerWaitGroup.Wait()
// Ignore the error here there's not much we can do with it
// They're logged in the CloseProvidedListeners function
// Ignore the error here there's not much we can do with it, they're logged in the CloseProvidedListeners function
_ = CloseProvidedListeners()
g.notify(readyMsg)
}()
@ -133,7 +71,7 @@ func (g *Manager) start(ctx context.Context) {
return
case <-g.IsShutdown():
func() {
// When waitgroup counter goes negative it will panic - we don't care about this so we can just ignore it.
// When WaitGroup counter goes negative it will panic - we don't care about this so we can just ignore it.
defer func() {
_ = recover()
}()
@ -255,29 +193,3 @@ func (g *Manager) DoGracefulRestart() {
g.doShutdown()
}
}
// DoImmediateHammer causes an immediate hammer
func (g *Manager) DoImmediateHammer() {
g.notify(statusMsg("Sending immediate hammer"))
g.doHammerTime(0 * time.Second)
}
// DoGracefulShutdown causes a graceful shutdown
func (g *Manager) DoGracefulShutdown() {
g.lock.Lock()
if !g.forked {
g.lock.Unlock()
g.notify(stoppingMsg)
} else {
g.lock.Unlock()
g.notify(statusMsg("Shutting down after fork"))
}
g.doShutdown()
}
// RegisterServer registers the running of a listening server, in the case of unix this means that the parent process can now die.
// Any call to RegisterServer must be matched by a call to ServerDone
func (g *Manager) RegisterServer() {
KillParent()
g.runningServerWaitGroup.Add(1)
}

View File

@ -7,11 +7,9 @@
package graceful
import (
"context"
"os"
"runtime/pprof"
"strconv"
"sync"
"time"
"code.gitea.io/gitea/modules/log"
@ -30,64 +28,11 @@ const (
acceptHammerCode = svc.Accepted(hammerCode)
)
// Manager manages the graceful shutdown process
type Manager struct {
ctx context.Context
isChild bool
lock *sync.RWMutex
state state
shutdownCtx context.Context
hammerCtx context.Context
terminateCtx context.Context
managerCtx context.Context
shutdownCtxCancel context.CancelFunc
hammerCtxCancel context.CancelFunc
terminateCtxCancel context.CancelFunc
managerCtxCancel context.CancelFunc
runningServerWaitGroup sync.WaitGroup
createServerWaitGroup sync.WaitGroup
terminateWaitGroup sync.WaitGroup
shutdownRequested chan struct{}
toRunAtShutdown []func()
toRunAtTerminate []func()
}
func newGracefulManager(ctx context.Context) *Manager {
manager := &Manager{
isChild: false,
lock: &sync.RWMutex{},
ctx: ctx,
}
manager.createServerWaitGroup.Add(numberOfServersToCreate)
manager.start()
return manager
}
func (g *Manager) start() {
// Make contexts
g.terminateCtx, g.terminateCtxCancel = context.WithCancel(g.ctx)
g.shutdownCtx, g.shutdownCtxCancel = context.WithCancel(g.ctx)
g.hammerCtx, g.hammerCtxCancel = context.WithCancel(g.ctx)
g.managerCtx, g.managerCtxCancel = context.WithCancel(g.ctx)
// Next add pprof labels to these contexts
g.terminateCtx = pprof.WithLabels(g.terminateCtx, pprof.Labels("graceful-lifecycle", "with-terminate"))
g.shutdownCtx = pprof.WithLabels(g.shutdownCtx, pprof.Labels("graceful-lifecycle", "with-shutdown"))
g.hammerCtx = pprof.WithLabels(g.hammerCtx, pprof.Labels("graceful-lifecycle", "with-hammer"))
g.managerCtx = pprof.WithLabels(g.managerCtx, pprof.Labels("graceful-lifecycle", "with-manager"))
// Now label this and all goroutines created by this goroutine with the graceful-lifecycle manager
pprof.SetGoroutineLabels(g.managerCtx)
defer pprof.SetGoroutineLabels(g.ctx)
// Make channels
g.shutdownRequested = make(chan struct{})
// Set the running state
if !g.setStateTransition(stateInit, stateRunning) {
panic("invalid graceful manager state: transition from init to running failed")
}
if skip, _ := strconv.ParseBool(os.Getenv("SKIP_MINWINSVC")); skip {
log.Trace("Skipping SVC check as SKIP_MINWINSVC is set")
return
@ -201,30 +146,6 @@ hammerLoop:
return false, 0
}
// DoImmediateHammer causes an immediate hammer
func (g *Manager) DoImmediateHammer() {
g.doHammerTime(0 * time.Second)
}
// DoGracefulShutdown causes a graceful shutdown
func (g *Manager) DoGracefulShutdown() {
g.lock.Lock()
select {
case <-g.shutdownRequested:
g.lock.Unlock()
default:
close(g.shutdownRequested)
g.lock.Unlock()
g.doShutdown()
}
}
// RegisterServer registers the running of a listening server.
// Any call to RegisterServer must be matched by a call to ServerDone
func (g *Manager) RegisterServer() {
g.runningServerWaitGroup.Add(1)
}
func (g *Manager) awaitServer(limit time.Duration) bool {
c := make(chan struct{})
go func() {
@ -249,3 +170,11 @@ func (g *Manager) awaitServer(limit time.Duration) bool {
}
}
}
func (g *Manager) notify(msg systemdNotifyMsg) {
// Windows doesn't use systemd to notify
}
func KillParent() {
// Windows doesn't need to "kill parent" because there is no graceful restart
}

View File

@ -211,10 +211,11 @@ func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (
skip, limit := indexer_internal.ParsePaginator(options.Paginator, maxTotalHits)
searchRes, err := b.inner.Client.Index(b.inner.VersionedIndexName()).Search(options.Keyword, &meilisearch.SearchRequest{
Filter: query.Statement(),
Limit: int64(limit),
Offset: int64(skip),
Sort: sortBy,
Filter: query.Statement(),
Limit: int64(limit),
Offset: int64(skip),
Sort: sortBy,
MatchingStrategy: "all",
})
if err != nil {
return nil, err

View File

@ -18,9 +18,10 @@ import (
// Sanitizer is a protection wrapper of *bluemonday.Policy which does not allow
// any modification to the underlying policies once it's been created.
type Sanitizer struct {
defaultPolicy *bluemonday.Policy
rendererPolicies map[string]*bluemonday.Policy
init sync.Once
defaultPolicy *bluemonday.Policy
descriptionPolicy *bluemonday.Policy
rendererPolicies map[string]*bluemonday.Policy
init sync.Once
}
var (
@ -41,6 +42,7 @@ func NewSanitizer() {
func InitializeSanitizer() {
sanitizer.rendererPolicies = map[string]*bluemonday.Policy{}
sanitizer.defaultPolicy = createDefaultPolicy()
sanitizer.descriptionPolicy = createRepoDescriptionPolicy()
for name, renderer := range renderers {
sanitizerRules := renderer.SanitizerRules()
@ -161,6 +163,27 @@ func createDefaultPolicy() *bluemonday.Policy {
return policy
}
// createRepoDescriptionPolicy returns a minimal more strict policy that is used for
// repository descriptions.
func createRepoDescriptionPolicy() *bluemonday.Policy {
policy := bluemonday.NewPolicy()
// Allow italics and bold.
policy.AllowElements("i", "b", "em", "strong")
// Allow code.
policy.AllowElements("code")
// Allow links
policy.AllowAttrs("href", "target", "rel").OnElements("a")
// Allow classes for emojis
policy.AllowAttrs("class").Matching(regexp.MustCompile(`^emoji$`)).OnElements("img", "span")
policy.AllowAttrs("aria-label").OnElements("span")
return policy
}
func addSanitizerRules(policy *bluemonday.Policy, rules []setting.MarkupSanitizerRule) {
for _, rule := range rules {
if rule.AllowDataURIImages {
@ -176,6 +199,12 @@ func addSanitizerRules(policy *bluemonday.Policy, rules []setting.MarkupSanitize
}
}
// SanitizeDescription sanitizes the HTML generated for a repository description.
func SanitizeDescription(s string) string {
NewSanitizer()
return sanitizer.descriptionPolicy.Sanitize(s)
}
// Sanitize takes a string that contains a HTML fragment or document and applies policy whitelist.
func Sanitize(s string) string {
NewSanitizer()

View File

@ -73,6 +73,28 @@ func Test_Sanitizer(t *testing.T) {
}
}
func TestDescriptionSanitizer(t *testing.T) {
NewSanitizer()
testCases := []string{
`<h1>Title</h1>`, `Title`,
`<img src='img.png' alt='image'>`, ``,
`<span class="emoji" aria-label="thumbs up">THUMBS UP</span>`, `<span class="emoji" aria-label="thumbs up">THUMBS UP</span>`,
`<span style="color: red">Hello World</span>`, `<span>Hello World</span>`,
`<br>`, ``,
`<a href="https://example.com" target="_blank" rel="noopener noreferrer">https://example.com</a>`, `<a href="https://example.com" target="_blank" rel="noopener noreferrer">https://example.com</a>`,
`<mark>Important!</mark>`, `Important!`,
`<details>Click me! <summary>Nothing to see here.</summary></details>`, `Click me! Nothing to see here.`,
`<input type="hidden">`, ``,
`<b>I</b> have a <i>strong</i> <strong>opinion</strong> about <em>this</em>.`, `<b>I</b> have a <i>strong</i> <strong>opinion</strong> about <em>this</em>.`,
`Provides alternative <code>wg(8)</code> tool`, `Provides alternative <code>wg(8)</code> tool`,
}
for i := 0; i < len(testCases); i += 2 {
assert.Equal(t, testCases[i+1], SanitizeDescription(testCases[i]))
}
}
func TestSanitizeNonEscape(t *testing.T) {
descStr := "<scrİpt>&lt;script&gt;alert(document.domain)&lt;/script&gt;</scrİpt>"

View File

@ -160,24 +160,25 @@ const notRegularFileMode = os.ModeSymlink | os.ModeNamedPipe | os.ModeSocket | o
// getDirectorySize returns the disk consumption for a given path
func getDirectorySize(path string) (int64, error) {
var size int64
err := filepath.WalkDir(path, func(_ string, info os.DirEntry, err error) error {
if err != nil {
if os.IsNotExist(err) { // ignore the error because the file maybe deleted during traversing.
return nil
}
err := filepath.WalkDir(path, func(_ string, entry os.DirEntry, err error) error {
if os.IsNotExist(err) { // ignore the error because some files (like temp/lock file) may be deleted during traversing.
return nil
} else if err != nil {
return err
}
if info.IsDir() {
if entry.IsDir() {
return nil
}
f, err := info.Info()
if err != nil {
info, err := entry.Info()
if os.IsNotExist(err) { // ignore the error as above
return nil
} else if err != nil {
return err
}
if (f.Mode() & notRegularFileMode) == 0 {
size += f.Size()
if (info.Mode() & notRegularFileMode) == 0 {
size += info.Size()
}
return err
return nil
})
return size, err
}

View File

@ -22,9 +22,11 @@ var (
ZombieTaskTimeout time.Duration `ini:"ZOMBIE_TASK_TIMEOUT"`
EndlessTaskTimeout time.Duration `ini:"ENDLESS_TASK_TIMEOUT"`
AbandonedJobTimeout time.Duration `ini:"ABANDONED_JOB_TIMEOUT"`
SkipWorkflowStrings []string `ìni:"SKIP_WORKFLOW_STRINGS"`
}{
Enabled: true,
DefaultActionsURL: defaultActionsURLGitHub,
Enabled: true,
DefaultActionsURL: defaultActionsURLGitHub,
SkipWorkflowStrings: []string{"[skip ci]", "[ci skip]", "[no ci]", "[skip actions]", "[actions skip]"},
}
)

View File

@ -13,7 +13,7 @@ var Attachment = struct {
}{
Storage: &Storage{},
AllowedTypes: ".csv,.docx,.fodg,.fodp,.fods,.fodt,.gif,.gz,.jpeg,.jpg,.log,.md,.mov,.mp4,.odf,.odg,.odp,.ods,.odt,.patch,.pdf,.png,.pptx,.svg,.tgz,.txt,.webm,.xls,.xlsx,.zip",
MaxSize: 4,
MaxSize: 2048,
MaxFiles: 5,
Enabled: true,
}
@ -26,7 +26,7 @@ func loadAttachmentFrom(rootCfg ConfigProvider) (err error) {
}
Attachment.AllowedTypes = sec.Key("ALLOWED_TYPES").MustString(".csv,.docx,.fodg,.fodp,.fods,.fodt,.gif,.gz,.jpeg,.jpg,.log,.md,.mov,.mp4,.odf,.odg,.odp,.ods,.odt,.patch,.pdf,.png,.pptx,.svg,.tgz,.txt,.webm,.xls,.xlsx,.zip")
Attachment.MaxSize = sec.Key("MAX_SIZE").MustInt64(4)
Attachment.MaxSize = sec.Key("MAX_SIZE").MustInt64(2048)
Attachment.MaxFiles = sec.Key("MAX_FILES").MustInt(5)
Attachment.Enabled = sec.Key("ENABLED").MustBool(true)

View File

@ -9,7 +9,6 @@ import (
"net"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
@ -109,7 +108,7 @@ func DBConnStr() (string, error) {
connStr = fmt.Sprintf("%s:%s@%s(%s)/%s%scharset=%s&parseTime=true&tls=%s",
Database.User, Database.Passwd, connType, Database.Host, Database.Name, paramSep, Database.MysqlCharset, tls)
case "postgres":
connStr = getPostgreSQLConnectionString(Database.Host, Database.User, Database.Passwd, Database.Name, paramSep, Database.SSLMode)
connStr = getPostgreSQLConnectionString(Database.Host, Database.User, Database.Passwd, Database.Name, Database.SSLMode)
case "mssql":
host, port := ParseMSSQLHostPort(Database.Host)
connStr = fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;", host, port, Database.Name, Database.User, Database.Passwd)
@ -117,7 +116,7 @@ func DBConnStr() (string, error) {
if !EnableSQLite3 {
return "", errors.New("this Gitea binary was not built with SQLite3 support")
}
if err := os.MkdirAll(path.Dir(Database.Path), os.ModePerm); err != nil {
if err := os.MkdirAll(filepath.Dir(Database.Path), os.ModePerm); err != nil {
return "", fmt.Errorf("Failed to create directories: %w", err)
}
journalMode := ""
@ -157,7 +156,8 @@ func parsePostgreSQLHostPort(info string) (host, port string) {
return host, port
}
func getPostgreSQLConnectionString(dbHost, dbUser, dbPasswd, dbName, dbParam, dbsslMode string) (connStr string) {
func getPostgreSQLConnectionString(dbHost, dbUser, dbPasswd, dbName, dbsslMode string) (connStr string) {
dbName, dbParam, _ := strings.Cut(dbName, "?")
host, port := parsePostgreSQLHostPort(dbHost)
connURL := url.URL{
Scheme: "postgres",

View File

@ -59,38 +59,39 @@ func Test_parsePostgreSQLHostPort(t *testing.T) {
func Test_getPostgreSQLConnectionString(t *testing.T) {
tests := []struct {
Host string
Port string
User string
Passwd string
Name string
Param string
SSLMode string
Output string
}{
{
Host: "/tmp/pg.sock",
Port: "4321",
User: "testuser",
Passwd: "space space !#$%^^%^```-=?=",
Name: "gitea",
Param: "",
SSLMode: "false",
Output: "postgres://testuser:space%20space%20%21%23$%25%5E%5E%25%5E%60%60%60-=%3F=@:5432/gitea?host=%2Ftmp%2Fpg.sock&sslmode=false",
},
{
Host: "localhost",
Port: "1234",
User: "pgsqlusername",
Passwd: "I love Gitea!",
Name: "gitea",
Param: "",
SSLMode: "true",
Output: "postgres://pgsqlusername:I%20love%20Gitea%21@localhost:5432/gitea?sslmode=true",
},
{
Host: "localhost:1234",
User: "user",
Passwd: "pass",
Name: "gitea?param=1",
Output: "postgres://user:pass@localhost:1234/gitea?param=1&sslmode=",
},
}
for _, test := range tests {
connStr := getPostgreSQLConnectionString(test.Host, test.User, test.Passwd, test.Name, test.Param, test.SSLMode)
connStr := getPostgreSQLConnectionString(test.Host, test.User, test.Passwd, test.Name, test.SSLMode)
assert.Equal(t, test.Output, connStr)
}
}

View File

@ -184,7 +184,7 @@ var (
Enabled: true,
TempPath: "data/tmp/uploads",
AllowedTypes: "",
FileMaxSize: 3,
FileMaxSize: 50,
MaxFiles: 5,
},

View File

@ -230,6 +230,7 @@ func RenderMarkdownToHtml(ctx context.Context, input string) template.HTML { //n
output, err := markdown.RenderString(&markup.RenderContext{
Ctx: ctx,
URLPrefix: setting.AppSubURL,
Metas: map[string]string{"mode": "document"},
}, input)
if err != nil {
log.Error("RenderString: %v", err)

View File

@ -136,6 +136,10 @@ func (r *Route) Get(pattern string, h ...any) {
r.Methods("GET", pattern, h...)
}
func (r *Route) Options(pattern string, h ...any) {
r.Methods("OPTIONS", pattern, h...)
}
// GetOptions delegate get and options method
func (r *Route) GetOptions(pattern string, h ...any) {
r.Methods("GET,OPTIONS", pattern, h...)

View File

@ -0,0 +1,10 @@
SAX2 is Free!
I hereby abandon any property rights to SAX 2.0 (the Simple API for
XML), and release all of the SAX 2.0 source code, compiled code, and
documentation contained in this distribution into the Public Domain.
SAX comes with NO WARRANTY or guarantee of fitness for any
purpose.
David Megginson, david@megginson.com
2000-05-05

37
options/license/radvd Normal file
View File

@ -0,0 +1,37 @@
The author(s) grant permission for redistribution and use in source and
binary forms, with or without modification, of the software and documentation
provided that the following conditions are met:
0. If you receive a version of the software that is specifically labelled
as not being for redistribution (check the version message and/or README),
you are not permitted to redistribute that version of the software in any
way or form.
1. All terms of all other applicable copyrights and licenses must be
followed.
2. Redistributions of source code must retain the authors' copyright
notice(s), this list of conditions, and the following disclaimer.
3. Redistributions in binary form must reproduce the authors' copyright
notice(s), this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.
4. All advertising materials mentioning features or use of this software
must display the following acknowledgement with the name(s) of the
authors as specified in the copyright notice(s) substituted where
indicated:
This product includes software developed by the authors which are
mentioned at the start of the source files and other contributors.
5. Neither the name(s) of the author(s) nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY ITS AUTHORS AND CONTRIBUTORS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1760,7 +1760,7 @@ pulls.no_merge_desc=Dieser Pull-Request kann nicht gemerged werden, da keine Mer
pulls.no_merge_helper=Aktiviere Mergeoptionen in den Repositoryeinstellungen oder merge den Pull-Request manuell.
pulls.no_merge_wip=Dieser Pull Request kann nicht gemergt werden, da er als Work In Progress gekennzeichnet ist.
pulls.no_merge_not_ready=Dieser Pull-Request kann nicht gemergt werden, überprüfe den Reviewstatus und die Statusprüfungen.
pulls.no_merge_access=Du bist nicht berechtigt, diesen Pull-Request zu Mergen.
pulls.no_merge_access=Du bist nicht berechtigt, diesen Pull-Request zu mergen.
pulls.merge_pull_request=Merge Commit erstellen
pulls.rebase_merge_pull_request=Rebasen und dann fast-forwarden
pulls.rebase_merge_commit_pull_request=Rebasen und dann mergen

View File

@ -2312,7 +2312,7 @@ settings.dismiss_stale_approvals_desc = When new commits that change the content
settings.require_signed_commits = Require Signed Commits
settings.require_signed_commits_desc = Reject pushes to this branch if they are unsigned or unverifiable.
settings.protect_branch_name_pattern = Protected Branch Name Pattern
settings.protect_branch_name_pattern_desc = "Protected branch name patterns. See <a href="github.com/gobwas/glob">the documentation</a> for pattern syntax. Examples: main, release/**"
settings.protect_branch_name_pattern_desc = "Protected branch name patterns. See <a href="https://github.com/gobwas/glob">the documentation</a> for pattern syntax. Examples: main, release/**"
settings.protect_patterns = Patterns
settings.protect_protected_file_patterns = "Protected file patterns (separated using semicolon ';'):"
settings.protect_protected_file_patterns_desc = "Protected files are not allowed to be changed directly even if user has rights to add, edit, or delete files in this branch. Multiple patterns can be separated using semicolon (';'). See <a href='https://pkg.go.dev/github.com/gobwas/glob#Compile'>github.com/gobwas/glob</a> documentation for pattern syntax. Examples: <code>.drone.yml</code>, <code>/docs/**/*.txt</code>."
@ -2883,6 +2883,7 @@ packages.package_manage_panel = Package Management
packages.total_size = Total Size: %s
packages.unreferenced_size = Unreferenced Size: %s
packages.cleanup = Clean up expired data
packages.cleanup.success = Cleaned up expired data successfully
packages.owner = Owner
packages.creator = Creator
packages.name = Name
@ -3528,7 +3529,11 @@ runs.status = Status
runs.actors_no_select = All actors
runs.status_no_select = All status
runs.no_results = No results matched.
runs.no_workflows = There are no workflows yet.
runs.no_workflows.quick_start = Don't know how to start with Gitea Action? See <a target="_blank" rel="noopener noreferrer" href="%s">the quick start guide</a>.
runs.no_workflows.documentation = For more information on the Gitea Action, see <a target="_blank" rel="noopener noreferrer" href="%s">the documentation</a>.
runs.no_runs = The workflow has no runs yet.
runs.empty_commit_message = (empty commit message)
workflow.disable = Disable Workflow
workflow.disable_success = Workflow '%s' disabled successfully.

View File

@ -1330,9 +1330,9 @@ projects.column.new_title=Nom
projects.column.new_submit=Créer une colonne
projects.column.new=Nouvelle colonne
projects.column.set_default=Définir par défaut
projects.column.set_default_desc=Missionne cette colonne d'accueillir les tickets et demande d'ajouts non catégorisés.
projects.column.set_default_desc=Les tickets et demandes dajout non-catégorisés seront placés dans cette colonne.
projects.column.unset_default=Défaire par défaut
projects.column.unset_default_desc=Décharge cette colonne d'accueillir les tickets et demandes d'ajouts non catégorisées. Ceux-ci iront dans une colonne idoine.
projects.column.unset_default_desc=Les tickets et demandes d'ajouts non-catégorisés seront placés dans une colonne idoine.
projects.column.delete=Supprimer la colonne
projects.column.deletion_desc=La suppression d'une colonne de projet déplace tous les tickets liés à 'Non catégorisé'. Continuer ?
projects.column.color=Couleur
@ -3214,22 +3214,22 @@ notices.delete_success=Les informations systèmes ont été supprimées.
[action]
create_repo=a créé le dépôt <a href="%s">%s</a>
rename_repo=a rebaptisé le dépôt de <code>%[1]s</code> vers <a href="%[2]s">%[3]s</a>
commit_repo=a soumis sur <a href="%[2]s">%[3]s</a> à <a href="%[1]s">%[4]s</a>
create_issue=`ticket ouvert <a href="%[1]s">%[3]s#%[2]s</a>`
close_issue=`ticket fermé <a href="%[1]s">%[3]s#%[2]s</a>`
rename_repo=a rebaptisé le dépôt <s><code>%[1]s</code></s> en <a href="%[2]s">%[3]s</a>
commit_repo=a soumis sur <a href="%[2]s">%[3]s</a> dans <a href="%[1]s">%[4]s</a>
create_issue=`a ouvert le ticket <a href="%[1]s">%[3]s#%[2]s</a>`
close_issue=`a fermé le ticket <a href="%[1]s">%[3]s#%[2]s</a>`
reopen_issue=`a réouvert le ticket <a href="%[1]s">%[3]s#%[2]s</a>`
create_pull_request=`a créé la demande d'ajout <a href="%[1]s">%[3]s#%[2]s</a>`
close_pull_request=`a fermé la demande d'ajout <a href="%[1]s">%[3]s#%[2]s</a>`
reopen_pull_request=`a réouvert la demande d'ajout <a href="%[1]s">%[3]s#%[2]s</a>`
create_pull_request=`a créé la demande dajout <a href="%[1]s">%[3]s#%[2]s</a>`
close_pull_request=`a fermé la demande dajout <a href="%[1]s">%[3]s#%[2]s</a>`
reopen_pull_request=`a réouvert la demande dajout <a href="%[1]s">%[3]s#%[2]s</a>`
comment_issue=`a commenté le ticket <a href="%[1]s">%[3]s#%[2]s</a>`
comment_pull=`a commenté la demande d'ajout <a href="%[1]s">%[3]s#%[2]s</a>`
merge_pull_request=`a fusionné la demande d'ajout <a href="%[1]s">%[3]s#%[2]s</a>`
auto_merge_pull_request=`a fusionné automatiquement la demande dajout <a href="%[1]s">%[3]s#%[2]s</a>.`
transfer_repo=a transféré le dépôt <code>%s</code> à <a href="%s">%s</a>
push_tag=a poussé l'étiquette <a href="%[2]s">%[3]s</a> vers <a href="%[1]s">%[4]s</a>
delete_tag=étiquette supprimée %[2]s de <a href="%[1]s">%[3]s</a>
delete_branch=branche %[2]s supprimée de <a href="%[1]s">%[3]s</a>
comment_pull=`a commenté la demande dajout <a href="%[1]s">%[3]s#%[2]s</a>`
merge_pull_request=`a fusionné la demande dajout <a href="%[1]s">%[3]s#%[2]s</a>`
auto_merge_pull_request=`a fusionné automatiquement la demande dajout <a href="%[1]s">%[3]s#%[2]s</a>`
transfer_repo=a transféré le dépôt <code>%s</code> vers <a href="%s">%s</a>
push_tag=a poussé létiquette <a href="%[2]s">%[3]s</a> vers <a href="%[1]s">%[4]s</a>
delete_tag=a supprimé létiquette %[2]s de <a href="%[1]s">%[3]s</a>
delete_branch=a supprimée la branche %[2]s de <a href="%[1]s">%[3]s</a>
compare_branch=Comparer
compare_commits=Comparer %d révisions
compare_commits_general=Comparer les révisions
@ -3242,8 +3242,8 @@ publish_release=`a publié <a href="%[2]s"> "%[4]s" </a> à <a href="%[1]s">%[3]
review_dismissed=`a révoqué lévaluation de <b>%[4]s</b> sur <a href="%[1]s">%[3]s#%[2]s</a>.`
review_dismissed_reason=Raison :
create_branch=a créé la branche <a href="%[2]s">%[3]s</a> dans <a href="%[1]s">%[4]s</a>.
starred_repo=est fan de <a href="%[1]s">%[2]s</a>.
watched_repo=observe <a href="%[1]s">%[2]s</a>.
starred_repo=aime <a href="%[1]s">%[2]s</a>
watched_repo=observe <a href="%[1]s">%[2]s</a>
[tool]
now=maintenant

View File

@ -3305,7 +3305,7 @@ error.unit_not_allowed=Não tem permissão para aceder a esta parte do repositó
title=Pacotes
desc=Gerir pacotes do repositório.
empty=Ainda não há pacotes.
empty.documentation=Para obter mais informação sobre o registo de pacotes, veja <a target="_blank" rel="noopener noreferrer" href="https://docs.gitea.io/en-us/usage/packages/overview/">a documentação</a>.
empty.documentation=Para obter mais informação sobre o registo de pacotes, veja <a target="_blank" rel="noopener noreferrer" href="%s">a documentação</a>.
empty.repo=Carregou um pacote mas este não é apresentado aqui? Vá às <a href="%[1]s">configurações do pacote</a> e ligue-o a este repositório.
registry.documentation=Para mais informação sobre o registo %s, veja <a target="_blank" rel="noopener noreferrer" href="%s">a documentação</a>.
filter.type=Tipo

229
package-lock.json generated
View File

@ -12,7 +12,7 @@
"@claviska/jquery-minicolors": "2.3.6",
"@github/markdown-toolbar-element": "2.2.1",
"@github/relative-time-element": "4.3.0",
"@github/text-expander-element": "2.5.0",
"@github/text-expander-element": "2.6.1",
"@mcaptcha/vanilla-glue": "0.1.0-alpha-3",
"@primer/octicons": "19.8.0",
"@webcomponents/custom-elements": "1.6.0",
@ -25,12 +25,12 @@
"easymde": "2.18.0",
"esbuild-loader": "4.0.2",
"escape-goat": "4.0.0",
"fast-glob": "3.3.1",
"fast-glob": "3.3.2",
"jquery": "3.7.1",
"katex": "0.16.9",
"license-checker-webpack-plugin": "0.2.1",
"lightningcss-loader": "2.1.0",
"mermaid": "10.6.0",
"mermaid": "10.6.1",
"mini-css-extract-plugin": "2.7.6",
"minimatch": "9.0.3",
"monaco-editor": "0.44.0",
@ -38,14 +38,14 @@
"pdfobject": "2.2.12",
"pretty-ms": "8.0.0",
"sortablejs": "1.15.0",
"swagger-ui-dist": "5.9.1",
"swagger-ui-dist": "5.10.0",
"throttle-debounce": "5.0.0",
"tinycolor2": "1.6.0",
"tippy.js": "6.3.7",
"toastify-js": "1.12.0",
"tributejs": "5.1.3",
"uint8-to-base64": "0.2.0",
"vue": "3.3.7",
"vue": "3.3.8",
"vue-bar-graph": "2.0.0",
"vue-loader": "17.3.1",
"vue3-calendar-heatmap": "2.0.5",
@ -55,11 +55,11 @@
},
"devDependencies": {
"@eslint-community/eslint-plugin-eslint-comments": "4.1.0",
"@playwright/test": "1.39.0",
"@playwright/test": "1.40.0",
"@stoplight/spectral-cli": "6.11.0",
"@stylistic/eslint-plugin-js": "1.0.0",
"@vitejs/plugin-vue": "4.4.0",
"eslint": "8.53.0",
"@stylistic/eslint-plugin-js": "1.4.0",
"@vitejs/plugin-vue": "4.5.0",
"eslint": "8.54.0",
"eslint-plugin-array-func": "4.0.0",
"eslint-plugin-i": "2.29.0",
"eslint-plugin-jquery": "1.5.1",
@ -80,7 +80,7 @@
"stylelint-declaration-block-no-ignored-properties": "2.7.0",
"stylelint-declaration-strict-value": "1.9.2",
"stylelint-stylistic": "0.4.3",
"svgo": "3.0.2",
"svgo": "3.0.4",
"updates": "15.0.4",
"vite-string-plugin": "1.1.2",
"vitest": "0.34.6"
@ -286,9 +286,9 @@
}
},
"node_modules/@babel/parser": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz",
"integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==",
"version": "7.23.3",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.3.tgz",
"integrity": "sha512-uVsWNvlVsIninV2prNz/3lHCb+5CJ+e+IUBfbjToAHODtfGYLfCFuY4AU7TskI+dAKk+njsPiBjq1gKTvZOBaw==",
"bin": {
"parser": "bin/babel-parser.js"
},
@ -987,9 +987,9 @@
}
},
"node_modules/@eslint/js": {
"version": "8.53.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.53.0.tgz",
"integrity": "sha512-Kn7K8dx/5U6+cT1yEhpX1w4PCSg0M+XyRILPgvwcEBjerFWCwQj5sbr3/VmxqV0JGHCBCzyd6LxypEuehypY1w==",
"version": "8.54.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.54.0.tgz",
"integrity": "sha512-ut5V+D+fOoWPgGGNj83GGjnntO39xDy6DWxO0wb7Jp3DcMX0TfIqdzHF85VTQkerdyGmuuMD9AKAo5KiNlf/AQ==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@ -1011,9 +1011,9 @@
"integrity": "sha512-+tFjX9//HRS1HnBa5cNgfEtE52arwiutYg1TOF+Trk40SPxst9Q8Rtc3BKD6aKsvfbtub68vfhipgchGjj9o7g=="
},
"node_modules/@github/text-expander-element": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/@github/text-expander-element/-/text-expander-element-2.5.0.tgz",
"integrity": "sha512-BjCxTshkUCgNXo/8iXUSK1sJ7kMJqhVsw6LAzIFtgaYrm4q2068WnPKjngfR+/QPhxN1nSvgd7CozblEIYjUZA==",
"version": "2.6.1",
"resolved": "https://registry.npmjs.org/@github/text-expander-element/-/text-expander-element-2.6.1.tgz",
"integrity": "sha512-i6krPGXJRABfKXut0WArFd365Je4PT0MljtDoXUoCOEp+lGrmdosDMxmO0EfOYc97jBn+Hd2XO1mMsuI5+fwmQ==",
"dependencies": {
"@github/combobox-nav": "^2.0.2"
}
@ -1349,12 +1349,12 @@
}
},
"node_modules/@playwright/test": {
"version": "1.39.0",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.39.0.tgz",
"integrity": "sha512-3u1iFqgzl7zr004bGPYiN/5EZpRUSFddQBra8Rqll5N0/vfpqlP9I9EXqAoGacuAbX6c9Ulg/Cjqglp5VkK6UQ==",
"version": "1.40.0",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.40.0.tgz",
"integrity": "sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==",
"dev": true,
"dependencies": {
"playwright": "1.39.0"
"playwright": "1.40.0"
},
"bin": {
"playwright": "cli.js"
@ -1882,17 +1882,15 @@
"dev": true
},
"node_modules/@stylistic/eslint-plugin-js": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/@stylistic/eslint-plugin-js/-/eslint-plugin-js-1.0.0.tgz",
"integrity": "sha512-xxvjyYnUEgjBTnXKYMk6JbU0LHkf269d6y4IgW69bK/VwHrqLfdgE6mYvft42U7KVpp6Tbf6Z64tLRYD/rYd/A==",
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/@stylistic/eslint-plugin-js/-/eslint-plugin-js-1.4.0.tgz",
"integrity": "sha512-cANyn4ECWu8kxPmBM4K/Q4WocD3JbA0POmGbA2lJ4tynPE8jGyKpfP8SZj6BIidXV0pkyqvxEfaKppB4D16UsA==",
"dev": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.4.0",
"acorn": "^8.10.0",
"acorn": "^8.11.2",
"escape-string-regexp": "^4.0.0",
"eslint-visitor-keys": "^3.4.3",
"espree": "^9.6.1",
"esutils": "^2.0.3",
"graphemer": "^1.4.0"
}
},
@ -2180,15 +2178,15 @@
"dev": true
},
"node_modules/@vitejs/plugin-vue": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-4.4.0.tgz",
"integrity": "sha512-xdguqb+VUwiRpSg+nsc2HtbAUSGak25DXYvpQQi4RVU1Xq1uworyoH/md9Rfd8zMmPR/pSghr309QNcftUVseg==",
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-4.5.0.tgz",
"integrity": "sha512-a2WSpP8X8HTEww/U00bU4mX1QpLINNuz/2KMNpLsdu3BzOpak3AGI1CJYBTXcc4SPhaD0eNRUp7IyQK405L5dQ==",
"dev": true,
"engines": {
"node": "^14.18.0 || >=16.0.0"
},
"peerDependencies": {
"vite": "^4.0.0",
"vite": "^4.0.0 || ^5.0.0",
"vue": "^3.2.25"
}
},
@ -2300,36 +2298,36 @@
}
},
"node_modules/@vue/compiler-core": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.3.7.tgz",
"integrity": "sha512-pACdY6YnTNVLXsB86YD8OF9ihwpolzhhtdLVHhBL6do/ykr6kKXNYABRtNMGrsQXpEXXyAdwvWWkuTbs4MFtPQ==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.3.8.tgz",
"integrity": "sha512-hN/NNBUECw8SusQvDSqqcVv6gWq8L6iAktUR0UF3vGu2OhzRqcOiAno0FmBJWwxhYEXRlQJT5XnoKsVq1WZx4g==",
"dependencies": {
"@babel/parser": "^7.23.0",
"@vue/shared": "3.3.7",
"@vue/shared": "3.3.8",
"estree-walker": "^2.0.2",
"source-map-js": "^1.0.2"
}
},
"node_modules/@vue/compiler-dom": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.3.7.tgz",
"integrity": "sha512-0LwkyJjnUPssXv/d1vNJ0PKfBlDoQs7n81CbO6Q0zdL7H1EzqYRrTVXDqdBVqro0aJjo/FOa1qBAPVI4PGSHBw==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.3.8.tgz",
"integrity": "sha512-+PPtv+p/nWDd0AvJu3w8HS0RIm/C6VGBIRe24b9hSyNWOAPEUosFZ5diwawwP8ip5sJ8n0Pe87TNNNHnvjs0FQ==",
"dependencies": {
"@vue/compiler-core": "3.3.7",
"@vue/shared": "3.3.7"
"@vue/compiler-core": "3.3.8",
"@vue/shared": "3.3.8"
}
},
"node_modules/@vue/compiler-sfc": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.3.7.tgz",
"integrity": "sha512-7pfldWy/J75U/ZyYIXRVqvLRw3vmfxDo2YLMwVtWVNew8Sm8d6wodM+OYFq4ll/UxfqVr0XKiVwti32PCrruAw==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.3.8.tgz",
"integrity": "sha512-WMzbUrlTjfYF8joyT84HfwwXo+8WPALuPxhy+BZ6R4Aafls+jDBnSz8PDz60uFhuqFbl3HxRfxvDzrUf3THwpA==",
"dependencies": {
"@babel/parser": "^7.23.0",
"@vue/compiler-core": "3.3.7",
"@vue/compiler-dom": "3.3.7",
"@vue/compiler-ssr": "3.3.7",
"@vue/reactivity-transform": "3.3.7",
"@vue/shared": "3.3.7",
"@vue/compiler-core": "3.3.8",
"@vue/compiler-dom": "3.3.8",
"@vue/compiler-ssr": "3.3.8",
"@vue/reactivity-transform": "3.3.8",
"@vue/shared": "3.3.8",
"estree-walker": "^2.0.2",
"magic-string": "^0.30.5",
"postcss": "^8.4.31",
@ -2348,30 +2346,30 @@
}
},
"node_modules/@vue/compiler-ssr": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.3.7.tgz",
"integrity": "sha512-TxOfNVVeH3zgBc82kcUv+emNHo+vKnlRrkv8YvQU5+Y5LJGJwSNzcmLUoxD/dNzv0bhQ/F0s+InlgV0NrApJZg==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.3.8.tgz",
"integrity": "sha512-hXCqQL/15kMVDBuoBYpUnSYT8doDNwsjvm3jTefnXr+ytn294ySnT8NlsFHmTgKNjwpuFy7XVV8yTeLtNl/P6w==",
"dependencies": {
"@vue/compiler-dom": "3.3.7",
"@vue/shared": "3.3.7"
"@vue/compiler-dom": "3.3.8",
"@vue/shared": "3.3.8"
}
},
"node_modules/@vue/reactivity": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.3.7.tgz",
"integrity": "sha512-cZNVjWiw00708WqT0zRpyAgduG79dScKEPYJXq2xj/aMtk3SKvL3FBt2QKUlh6EHBJ1m8RhBY+ikBUzwc7/khg==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.3.8.tgz",
"integrity": "sha512-ctLWitmFBu6mtddPyOKpHg8+5ahouoTCRtmAHZAXmolDtuZXfjL2T3OJ6DL6ezBPQB1SmMnpzjiWjCiMYmpIuw==",
"dependencies": {
"@vue/shared": "3.3.7"
"@vue/shared": "3.3.8"
}
},
"node_modules/@vue/reactivity-transform": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/reactivity-transform/-/reactivity-transform-3.3.7.tgz",
"integrity": "sha512-APhRmLVbgE1VPGtoLQoWBJEaQk4V8JUsqrQihImVqKT+8U6Qi3t5ATcg4Y9wGAPb3kIhetpufyZ1RhwbZCIdDA==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/reactivity-transform/-/reactivity-transform-3.3.8.tgz",
"integrity": "sha512-49CvBzmZNtcHua0XJ7GdGifM8GOXoUMOX4dD40Y5DxI3R8OUhMlvf2nvgUAcPxaXiV5MQQ1Nwy09ADpnLQUqRw==",
"dependencies": {
"@babel/parser": "^7.23.0",
"@vue/compiler-core": "3.3.7",
"@vue/shared": "3.3.7",
"@vue/compiler-core": "3.3.8",
"@vue/shared": "3.3.8",
"estree-walker": "^2.0.2",
"magic-string": "^0.30.5"
}
@ -2388,40 +2386,40 @@
}
},
"node_modules/@vue/runtime-core": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.3.7.tgz",
"integrity": "sha512-LHq9du3ubLZFdK/BP0Ysy3zhHqRfBn80Uc+T5Hz3maFJBGhci1MafccnL3rpd5/3wVfRHAe6c+PnlO2PAavPTQ==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.3.8.tgz",
"integrity": "sha512-qurzOlb6q26KWQ/8IShHkMDOuJkQnQcTIp1sdP4I9MbCf9FJeGVRXJFr2mF+6bXh/3Zjr9TDgURXrsCr9bfjUw==",
"dependencies": {
"@vue/reactivity": "3.3.7",
"@vue/shared": "3.3.7"
"@vue/reactivity": "3.3.8",
"@vue/shared": "3.3.8"
}
},
"node_modules/@vue/runtime-dom": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.3.7.tgz",
"integrity": "sha512-PFQU1oeJxikdDmrfoNQay5nD4tcPNYixUBruZzVX/l0eyZvFKElZUjW4KctCcs52nnpMGO6UDK+jF5oV4GT5Lw==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.3.8.tgz",
"integrity": "sha512-Noy5yM5UIf9UeFoowBVgghyGGPIDPy1Qlqt0yVsUdAVbqI8eeMSsTqBtauaEoT2UFXUk5S64aWVNJN4MJ2vRdA==",
"dependencies": {
"@vue/runtime-core": "3.3.7",
"@vue/shared": "3.3.7",
"@vue/runtime-core": "3.3.8",
"@vue/shared": "3.3.8",
"csstype": "^3.1.2"
}
},
"node_modules/@vue/server-renderer": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.3.7.tgz",
"integrity": "sha512-UlpKDInd1hIZiNuVVVvLgxpfnSouxKQOSE2bOfQpBuGwxRV/JqqTCyyjXUWiwtVMyeRaZhOYYqntxElk8FhBhw==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.3.8.tgz",
"integrity": "sha512-zVCUw7RFskvPuNlPn/8xISbrf0zTWsTSdYTsUTN1ERGGZGVnRxM2QZ3x1OR32+vwkkCm0IW6HmJ49IsPm7ilLg==",
"dependencies": {
"@vue/compiler-ssr": "3.3.7",
"@vue/shared": "3.3.7"
"@vue/compiler-ssr": "3.3.8",
"@vue/shared": "3.3.8"
},
"peerDependencies": {
"vue": "3.3.7"
"vue": "3.3.8"
}
},
"node_modules/@vue/shared": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.3.7.tgz",
"integrity": "sha512-N/tbkINRUDExgcPTBvxNkvHGu504k8lzlNQRITVnm6YjOjwa4r0nnbd4Jb01sNpur5hAllyRJzSK5PvB9PPwRg=="
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.3.8.tgz",
"integrity": "sha512-8PGwybFwM4x8pcfgqEQFy70NaQxASvOC5DJwLQfpArw1UDfUXrJkdxD3BhVTMS+0Lef/TU7YO0Jvr0jJY8T+mw=="
},
"node_modules/@webassemblyjs/ast": {
"version": "1.11.6",
@ -4741,15 +4739,15 @@
}
},
"node_modules/eslint": {
"version": "8.53.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-8.53.0.tgz",
"integrity": "sha512-N4VuiPjXDUa4xVeV/GC/RV3hQW9Nw+Y463lkWaKKXKYMvmRiRDAtfpuPFLN+E1/6ZhyR8J2ig+eVREnYgUsiag==",
"version": "8.54.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-8.54.0.tgz",
"integrity": "sha512-NY0DfAkM8BIZDVl6PgSa1ttZbx3xHgJzSNJKYcQglem6CppHyMhRIQkBVSSMaSRnLhig3jsDbEzOjwCVt4AmmA==",
"dev": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.6.1",
"@eslint/eslintrc": "^2.1.3",
"@eslint/js": "8.53.0",
"@eslint/js": "8.54.0",
"@humanwhocodes/config-array": "^0.11.13",
"@humanwhocodes/module-importer": "^1.0.1",
"@nodelib/fs.walk": "^1.2.8",
@ -5298,9 +5296,9 @@
"dev": true
},
"node_modules/fast-glob": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz",
"integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==",
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz",
"integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==",
"dependencies": {
"@nodelib/fs.stat": "^2.0.2",
"@nodelib/fs.walk": "^1.2.3",
@ -7621,9 +7619,9 @@
}
},
"node_modules/mermaid": {
"version": "10.6.0",
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.6.0.tgz",
"integrity": "sha512-Hcti+Q2NiWnb2ZCijSX89Bn2i7TCUwosBdIn/d+u63Sz7y40XU6EKMctT4UX4qZuZGfKGZpfOeim2/KTrdR7aQ==",
"version": "10.6.1",
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.6.1.tgz",
"integrity": "sha512-Hky0/RpOw/1il9X8AvzOEChfJtVvmXm+y7JML5C//ePYMy0/9jCEmW1E1g86x9oDfW9+iVEdTV/i+M6KWRNs4A==",
"dependencies": {
"@braintree/sanitize-url": "^6.0.1",
"@types/d3-scale": "^4.0.3",
@ -8742,12 +8740,12 @@
"dev": true
},
"node_modules/playwright": {
"version": "1.39.0",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.39.0.tgz",
"integrity": "sha512-naE5QT11uC/Oiq0BwZ50gDmy8c8WLPRTEWuSSFVG2egBka/1qMoSqYQcROMT9zLwJ86oPofcTH2jBY/5wWOgIw==",
"version": "1.40.0",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.40.0.tgz",
"integrity": "sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==",
"dev": true,
"dependencies": {
"playwright-core": "1.39.0"
"playwright-core": "1.40.0"
},
"bin": {
"playwright": "cli.js"
@ -8760,9 +8758,9 @@
}
},
"node_modules/playwright-core": {
"version": "1.39.0",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.39.0.tgz",
"integrity": "sha512-+k4pdZgs1qiM+OUkSjx96YiKsXsmb59evFoqv8SKO067qBA+Z2s/dCzJij/ZhdQcs2zlTAgRKfeiiLm8PQ2qvw==",
"version": "1.40.0",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.40.0.tgz",
"integrity": "sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==",
"dev": true,
"bin": {
"playwright-core": "cli.js"
@ -10303,16 +10301,17 @@
"dev": true
},
"node_modules/svgo": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/svgo/-/svgo-3.0.2.tgz",
"integrity": "sha512-Z706C1U2pb1+JGP48fbazf3KxHrWOsLme6Rv7imFBn5EnuanDW1GPaA/P1/dvObE670JDePC3mnj0k0B7P0jjQ==",
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/svgo/-/svgo-3.0.4.tgz",
"integrity": "sha512-T+Xul3JwuJ6VGXKo/p2ndqx1ibxNKnLTvRc1ZTWKCfyKS/GgNjRZcYsK84fxTsy/izr91g/Rwx6fGnVgaFSI5g==",
"dev": true,
"dependencies": {
"@trysound/sax": "0.2.0",
"commander": "^7.2.0",
"css-select": "^5.1.0",
"css-tree": "^2.2.1",
"csso": "^5.0.5",
"css-what": "^6.1.0",
"csso": "5.0.5",
"picocolors": "^1.0.0"
},
"bin": {
@ -10336,9 +10335,9 @@
}
},
"node_modules/swagger-ui-dist": {
"version": "5.9.1",
"resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-5.9.1.tgz",
"integrity": "sha512-5zAx+hUwJb9T3EAntc7TqYkV716CMqG6sZpNlAAMOMWkNXRYxGkN8ADIvD55dQZ10LxN90ZM/TQmN7y1gpICnw=="
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-5.10.0.tgz",
"integrity": "sha512-PBTn5qDOQVtU29hrx74km86SnK3/mFtF3grI98y575y1aRpxiuStRTIvsfXFudPFkLofHU7H9a+fKrP+Oayc3g=="
},
"node_modules/symbol-tree": {
"version": "3.2.4",
@ -11512,15 +11511,15 @@
}
},
"node_modules/vue": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/vue/-/vue-3.3.7.tgz",
"integrity": "sha512-YEMDia1ZTv1TeBbnu6VybatmSteGOS3A3YgfINOfraCbf85wdKHzscD6HSS/vB4GAtI7sa1XPX7HcQaJ1l24zA==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/vue/-/vue-3.3.8.tgz",
"integrity": "sha512-5VSX/3DabBikOXMsxzlW8JyfeLKlG9mzqnWgLQLty88vdZL7ZJgrdgBOmrArwxiLtmS+lNNpPcBYqrhE6TQW5w==",
"dependencies": {
"@vue/compiler-dom": "3.3.7",
"@vue/compiler-sfc": "3.3.7",
"@vue/runtime-dom": "3.3.7",
"@vue/server-renderer": "3.3.7",
"@vue/shared": "3.3.7"
"@vue/compiler-dom": "3.3.8",
"@vue/compiler-sfc": "3.3.8",
"@vue/runtime-dom": "3.3.8",
"@vue/server-renderer": "3.3.8",
"@vue/shared": "3.3.8"
},
"peerDependencies": {
"typescript": "*"

View File

@ -11,7 +11,7 @@
"@claviska/jquery-minicolors": "2.3.6",
"@github/markdown-toolbar-element": "2.2.1",
"@github/relative-time-element": "4.3.0",
"@github/text-expander-element": "2.5.0",
"@github/text-expander-element": "2.6.1",
"@mcaptcha/vanilla-glue": "0.1.0-alpha-3",
"@primer/octicons": "19.8.0",
"@webcomponents/custom-elements": "1.6.0",
@ -24,12 +24,12 @@
"easymde": "2.18.0",
"esbuild-loader": "4.0.2",
"escape-goat": "4.0.0",
"fast-glob": "3.3.1",
"fast-glob": "3.3.2",
"jquery": "3.7.1",
"katex": "0.16.9",
"license-checker-webpack-plugin": "0.2.1",
"lightningcss-loader": "2.1.0",
"mermaid": "10.6.0",
"mermaid": "10.6.1",
"mini-css-extract-plugin": "2.7.6",
"minimatch": "9.0.3",
"monaco-editor": "0.44.0",
@ -37,14 +37,14 @@
"pdfobject": "2.2.12",
"pretty-ms": "8.0.0",
"sortablejs": "1.15.0",
"swagger-ui-dist": "5.9.1",
"swagger-ui-dist": "5.10.0",
"throttle-debounce": "5.0.0",
"tinycolor2": "1.6.0",
"tippy.js": "6.3.7",
"toastify-js": "1.12.0",
"tributejs": "5.1.3",
"uint8-to-base64": "0.2.0",
"vue": "3.3.7",
"vue": "3.3.8",
"vue-bar-graph": "2.0.0",
"vue-loader": "17.3.1",
"vue3-calendar-heatmap": "2.0.5",
@ -54,11 +54,11 @@
},
"devDependencies": {
"@eslint-community/eslint-plugin-eslint-comments": "4.1.0",
"@playwright/test": "1.39.0",
"@playwright/test": "1.40.0",
"@stoplight/spectral-cli": "6.11.0",
"@stylistic/eslint-plugin-js": "1.0.0",
"@vitejs/plugin-vue": "4.4.0",
"eslint": "8.53.0",
"@stylistic/eslint-plugin-js": "1.4.0",
"@vitejs/plugin-vue": "4.5.0",
"eslint": "8.54.0",
"eslint-plugin-array-func": "4.0.0",
"eslint-plugin-i": "2.29.0",
"eslint-plugin-jquery": "1.5.1",
@ -79,7 +79,7 @@
"stylelint-declaration-block-no-ignored-properties": "2.7.0",
"stylelint-declaration-strict-value": "1.9.2",
"stylelint-stylistic": "0.4.3",
"svgo": "3.0.2",
"svgo": "3.0.4",
"updates": "15.0.4",
"vite-string-plugin": "1.1.2",
"vitest": "0.34.6"

30
poetry.lock generated
View File

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
[[package]]
name = "click"
@ -27,12 +27,12 @@ files = [
[[package]]
name = "cssbeautifier"
version = "1.14.9"
version = "1.14.11"
description = "CSS unobfuscator and beautifier."
optional = false
python-versions = "*"
files = [
{file = "cssbeautifier-1.14.9.tar.gz", hash = "sha256:2da432472f68170eb854aff97b16a24721f5090ee36af2e31199590a89e7f71f"},
{file = "cssbeautifier-1.14.11.tar.gz", hash = "sha256:40544c2b62bbcb64caa5e7f37a02df95654e5ce1bcacadac4ca1f3dc89c31513"},
]
[package.dependencies]
@ -100,12 +100,12 @@ files = [
[[package]]
name = "jsbeautifier"
version = "1.14.9"
version = "1.14.11"
description = "JavaScript unobfuscator and beautifier."
optional = false
python-versions = "*"
files = [
{file = "jsbeautifier-1.14.9.tar.gz", hash = "sha256:c738ebc36b47bd94e4ca6dd17a9004c3cc74edad582ca1d60e0e5d5945a63cb9"},
{file = "jsbeautifier-1.14.11.tar.gz", hash = "sha256:6b632581ea60dd1c133cd25a48ad187b4b91f526623c4b0fb5443ef805250505"},
]
[package.dependencies]
@ -149,6 +149,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@ -156,8 +157,15 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@ -174,6 +182,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@ -181,6 +190,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@ -327,13 +337,13 @@ telegram = ["requests"]
[[package]]
name = "yamllint"
version = "1.32.0"
version = "1.33.0"
description = "A linter for YAML files."
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "yamllint-1.32.0-py3-none-any.whl", hash = "sha256:d97a66e48da820829d96077d76b8dfbe6c6140f106e558dae87e81ac4e6b30b7"},
{file = "yamllint-1.32.0.tar.gz", hash = "sha256:d01dde008c65de5b235188ab3110bebc59d18e5c65fc8a58267cd211cd9df34a"},
{file = "yamllint-1.33.0-py3-none-any.whl", hash = "sha256:28a19f5d68d28d8fec538a1db21bb2d84c7dc2e2ea36266da8d4d1c5a683814d"},
{file = "yamllint-1.33.0.tar.gz", hash = "sha256:2dceab9ef2d99518a2fcf4ffc964d44250ac4459be1ba3ca315118e4a1a81f7d"},
]
[package.dependencies]
@ -346,4 +356,4 @@ dev = ["doc8", "flake8", "flake8-import-order", "rstcheck[sphinx]", "sphinx"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
content-hash = "a2e2a44801f0da2cc5cc1d1a3f707853f51a4346fef725f98c67a074dcb223d6"
content-hash = "5bf456fd73e69c13f16d080503f6dbaf60294f15d74343c16ef5a57b5b345db1"

View File

@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="svg fontawesome-save" width="16" height="16" aria-hidden="true"><path d="m434 130-84-84a48 48 0 0 0-33.9-14H48A48 48 0 0 0 0 80v352a48 48 0 0 0 48 48h352a48 48 0 0 0 48-48V163.9a48 48 0 0 0-14-34zM224 416a64 64 0 1 1 0-128 64 64 0 0 1 0 128zm96-304.5V212a12 12 0 0 1-12 12H76a12 12 0 0 1-12-12V108a12 12 0 0 1 12-12h228.5a12 12 0 0 1 8.5 3.5l3.5 3.5a12 12 0 0 1 3.5 8.5z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="svg fontawesome-save" width="16" height="16" aria-hidden="true"><path d="m434 130-84-84a48 48 0 0 0-33.9-14H48A48 48 0 0 0 0 80v352a48 48 0 0 0 48 48h352a48 48 0 0 0 48-48V163.9a48 48 0 0 0-14-34zM224 416a64 64 0 1 1 0-128 64 64 0 0 1 0 128m96-304.5V212a12 12 0 0 1-12 12H76a12 12 0 0 1-12-12V108a12 12 0 0 1 12-12h228.5a12 12 0 0 1 8.5 3.5l3.5 3.5a12 12 0 0 1 3.5 8.5"/></svg>

Before

Width:  |  Height:  |  Size: 448 B

After

Width:  |  Height:  |  Size: 446 B

View File

@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 416 448" class="svg fontawesome-windows" width="16" height="16" aria-hidden="true"><path d="M170.5 251.5v162.75L0 390.75V251.5h170.5zm0-185.75V230.5H0V89.25zM416 251.5V448l-226.75-31.25V251.5H416zM416 32v198.5H189.25V63.25z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 416 448" class="svg fontawesome-windows" width="16" height="16" aria-hidden="true"><path d="M170.5 251.5v162.75L0 390.75V251.5zm0-185.75V230.5H0V89.25zM416 251.5V448l-226.75-31.25V251.5zM416 32v198.5H189.25V63.25z"/></svg>

Before

Width:  |  Height:  |  Size: 285 B

After

Width:  |  Height:  |  Size: 275 B

Some files were not shown because too many files have changed in this diff Show More