Merge branch 'main' into add-issue-planned-time

This commit is contained in:
stuzer05 2023-07-10 10:07:08 +03:00
commit fa662ec087
No known key found for this signature in database
GPG Key ID: A6ABAAA9268F9F4F
688 changed files with 10306 additions and 5612 deletions

View File

@ -22,13 +22,14 @@ plugins:
- eslint-plugin-wc
env:
es2022: true
es2024: true
node: true
globals:
__webpack_public_path__: true
overrides:
- files: ["web_src/**/*"]
globals:
__webpack_public_path__: true
process: false # https://github.com/webpack/webpack/issues/15833
- files: ["web_src/**/*", "docs/**/*"]
env:
browser: true
@ -155,7 +156,7 @@ rules:
import/no-restricted-paths: [0]
import/no-self-import: [2]
import/no-unassigned-import: [0]
import/no-unresolved: [2, {commonjs: true, ignore: ["\\?.+$"]}]
import/no-unresolved: [2, {commonjs: true, ignore: [\?.+$, ^vitest/]}]
import/no-unused-modules: [2, {unusedExports: true}]
import/no-useless-path-segments: [2, {commonjs: true}]
import/no-webpack-loader-syntax: [2]
@ -692,7 +693,7 @@ rules:
unicorn/prefer-dom-node-remove: [2]
unicorn/prefer-dom-node-text-content: [2]
unicorn/prefer-event-target: [2]
unicorn/prefer-export-from: [2, {ignoreUsedVariables: true}]
unicorn/prefer-export-from: [0]
unicorn/prefer-includes: [2]
unicorn/prefer-json-parse-buffer: [0]
unicorn/prefer-keyboard-event-key: [2]

View File

@ -1,6 +1,6 @@
name: Feature Request
description: Got an idea for a feature that Gitea doesn't have currently? Submit your idea here!
labels: ["kind/feature", "kind/proposal"]
labels: ["kind/proposal"]
body:
- type: markdown
attributes:

View File

@ -15,6 +15,8 @@ on:
value: ${{ jobs.detect.outputs.templates }}
docker:
value: ${{ jobs.detect.outputs.docker }}
swagger:
value: ${{ jobs.detect.outputs.swagger }}
jobs:
detect:
@ -27,6 +29,7 @@ jobs:
actions: ${{ steps.changes.outputs.actions }}
templates: ${{ steps.changes.outputs.templates }}
docker: ${{ steps.changes.outputs.docker }}
swagger: ${{ steps.changes.outputs.swagger }}
steps:
- uses: actions/checkout@v3
- uses: dorny/paths-filter@v2
@ -36,6 +39,7 @@ jobs:
backend:
- "**/*.go"
- "templates/**/*.tmpl"
- "assets/emoji.json"
- "go.mod"
- "go.sum"
- "Makefile"
@ -43,6 +47,7 @@ jobs:
frontend:
- "**/*.js"
- "web_src/**"
- "assets/emoji.json"
- "package.json"
- "package-lock.json"
- "Makefile"
@ -63,3 +68,6 @@ jobs:
- "Dockerfile.rootless"
- "docker/**"
- "Makefile"
swagger:
- "templates/swagger/v1_json.tmpl"

View File

@ -39,6 +39,18 @@ jobs:
- run: make deps-py
- run: make lint-templates
lint-swagger:
if: needs.files-changed.outputs.swagger == 'true'
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 20
- run: make deps-frontend
- run: make lint-swagger
lint-go-windows:
if: needs.files-changed.outputs.backend == 'true' || needs.files-changed.outputs.actions == 'true'
needs: files-changed

View File

@ -77,16 +77,21 @@ linters-settings:
extra-rules: true
lang-version: "1.20"
depguard:
list-type: denylist
# Check the list against standard lib.
include-go-root: true
packages-with-error-message:
- encoding/json: "use gitea's modules/json instead of encoding/json"
- github.com/unknwon/com: "use gitea's util and replacements"
- io/ioutil: "use os or io instead"
- golang.org/x/exp: "it's experimental and unreliable."
- code.gitea.io/gitea/modules/git/internal: "do not use the internal package, use AddXxx function instead"
- gopkg.in/ini.v1: "do not use the ini package, use gitea's config system instead"
rules:
main:
deny:
- pkg: encoding/json
desc: use gitea's modules/json instead of encoding/json
- pkg: github.com/unknwon/com
desc: use gitea's util and replacements
- pkg: io/ioutil
desc: use os or io instead
- pkg: golang.org/x/exp
desc: it's experimental and unreliable
- pkg: code.gitea.io/gitea/modules/git/internal
desc: do not use the internal package, use AddXxx function instead
- pkg: gopkg.in/ini.v1
desc: do not use the ini package, use gitea's config system instead
issues:
max-issues-per-linter: 0

View File

@ -50,7 +50,7 @@ rules:
declaration-no-important: null
declaration-property-max-values: null
declaration-property-unit-allowed-list: null
declaration-property-unit-disallowed-list: null
declaration-property-unit-disallowed-list: {line-height: [em]}
declaration-property-value-allowed-list: null
declaration-property-value-disallowed-list: null
declaration-property-value-no-unknown: true

View File

@ -4,6 +4,54 @@ This changelog goes through all the changes that have been made in each release
without substantial changes to our git log; to see the highlights of what has
been added to each release, please refer to the [blog](https://blog.gitea.io).
## [1.19.4](https://github.com/go-gitea/gitea/releases/tag/v1.19.4) - 2023-07-04
* SECURITY
* Fix open redirect check for more cases (#25143) (#25155)
* API
* Return `404` in the API if the requested webhooks were not found (#24823) (#24830)
* Fix `organization` field being `null` in `GET /api/v1/teams/{id}` (#24694) (#24696)
* ENHANCEMENTS
* Set `--font-weight-bold` to 600 (#24840)
* Make mailer SMTP check have timed context (#24751) (#24759)
* Do not select line numbers when selecting text from the action run logs (#24594) (#24596)
* BUGFIXES
* Fix bug when change user name (#25637) (#25645)
* Fix task list checkbox toggle to work with YAML front matter (#25184) (#25236)
* Hide limited users if viewed by anonymous ghost (#25214) (#25224)
* Add `WithPullRequest` for `actionsNotifier` (#25144) (#25196)
* Fix parallelly generating index failure with Mysql (#24567) (#25081)
* GitLab migration: Sanitize response for reaction list (#25054) (#25059)
* Fix users cannot visit issue attachment bug (#25019) (#25027)
* Fix missing reference prefix of commits when sync mirror repository (#24994)
* Only validate changed columns when update user (#24867) (#24903)
* Make DeleteIssue use correct context (#24885)
* Fix topics deleted via API not being deleted in org page (#24825) (#24829)
* Fix Actions being enabled accidentally (#24802) (#24810)
* Fix missed table name on iterate lfs meta objects (#24768) (#24774)
* Fix safari cookie session bug (#24772)
* Respect original content when creating secrets (#24745) (#24746)
* Fix Pull Mirror out-of-sync bugs (#24732) (#24733)
* Fix run list broken when trigger user deleted (#24706) (#24709)
* Fix issues list page multiple selection update milestones (#24660) (#24663)
* Fix: release page for empty or non-existing target (#24659)
* Fix close org projects (#24588) (#24591)
* Refresh the refernce of the closed PR when reopening (#24231) (#24587)
* Fix the permission of team's `Actions` unit issue (#24536) (#24545)
* Bump go.etcd.io/bbolt and blevesearch deps (#23062) (#24519)
* Fix new wiki page mirror (#24518)
* Match unqualified references when syncing pulls as well (#23070)
* DOCS
* Change branch name from master to main in some documents' links (#25126) (#25139)
* Remove unnecessary content on docs (#24976) (#25001)
* Unify doc links to use paths relative to doc folder (#24979) (#25000)
* Fix docs documenting invalid `@every` for `OLDER_THAN` cron settings (#24695) (#24698)
* MISC
* Merge different languages for language stats (#24900) (#24921)
* Hiding Secrets options when Actions feature is disabled (#24792)
* Improve decryption failure message (#24573) (#24575)
* Makefile: Use portable !, not GNUish -not, with find(1). (#24565) (#24572)
## [1.19.3](https://github.com/go-gitea/gitea/releases/tag/1.19.3) - 2023-05-03
* SECURITY

View File

@ -52,3 +52,5 @@ Xinyi Gong <hestergong@gmail.com> (@HesterG)
wxiaoguang <wxiaoguang@gmail.com> (@wxiaoguang)
Gary Moon <gary@garymoon.net> (@garymoon)
Philip Peterson <philip.c.peterson@gmail.com> (@philip-peterson)
Denys Konovalov <kontakt@denyskon.de> (@denyskon)
Punit Inani <punitinani1@gmail.com> (@puni9869)

View File

@ -25,17 +25,17 @@ COMMA := ,
XGO_VERSION := go-1.20.x
AIR_PACKAGE ?= github.com/cosmtrek/air@v1.43.0
AIR_PACKAGE ?= github.com/cosmtrek/air@v1.44.0
EDITORCONFIG_CHECKER_PACKAGE ?= github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker@2.7.0
GOFUMPT_PACKAGE ?= mvdan.cc/gofumpt@v0.5.0
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.3
GXZ_PAGAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.11
MISSPELL_PACKAGE ?= github.com/client9/misspell/cmd/misspell@v0.3.4
SWAGGER_PACKAGE ?= github.com/go-swagger/go-swagger/cmd/swagger@v0.30.4
SWAGGER_PACKAGE ?= github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5
XGO_PACKAGE ?= src.techknowlogick.com/xgo@latest
GO_LICENSES_PACKAGE ?= github.com/google/go-licenses@v1.6.0
GOVULNCHECK_PACKAGE ?= golang.org/x/vuln/cmd/govulncheck@latest
ACTIONLINT_PACKAGE ?= github.com/rhysd/actionlint/cmd/actionlint@latest
GOVULNCHECK_PACKAGE ?= golang.org/x/vuln/cmd/govulncheck@v0.2.0
ACTIONLINT_PACKAGE ?= github.com/rhysd/actionlint/cmd/actionlint@v1.6.25
DOCKER_IMAGE ?= gitea/gitea
DOCKER_TAG ?= latest
@ -226,6 +226,8 @@ help:
@echo " - test-frontend test frontend files"
@echo " - test-backend test backend files"
@echo " - test-e2e[\#TestSpecificName] test end to end using playwright"
@echo " - update-js update js dependencies"
@echo " - update-py update py dependencies"
@echo " - webpack build webpack files"
@echo " - svg build svg files"
@echo " - fomantic build fomantic files"
@ -358,10 +360,10 @@ lint: lint-frontend lint-backend
lint-fix: lint-frontend-fix lint-backend-fix
.PHONY: lint-frontend
lint-frontend: lint-js lint-css lint-md lint-swagger
lint-frontend: lint-js lint-css
.PHONY: lint-frontend-fix
lint-frontend-fix: lint-js-fix lint-css-fix lint-md lint-swagger
lint-frontend-fix: lint-js-fix lint-css-fix
.PHONY: lint-backend
lint-backend: lint-go lint-go-vet lint-editorconfig
@ -924,13 +926,20 @@ node_modules: package-lock.json
poetry install
@touch .venv
.PHONY: npm-update
npm-update: node-check | node_modules
npx updates -cu
.PHONY: update-js
update-js: node-check | node_modules
npx updates -u -f package.json
rm -rf node_modules package-lock.json
npm install --package-lock
@touch node_modules
.PHONY: update-py
update-py: node-check | node_modules
npx updates -u -f pyproject.toml
rm -rf .venv poetry.lock
poetry install
@touch .venv
.PHONY: fomantic
fomantic:
rm -rf $(FOMANTIC_WORK_DIR)/build

2
assets/emoji.json generated

File diff suppressed because one or more lines are too long

View File

@ -25,7 +25,7 @@ import (
var optionLogVerbose bool
func logVerbose(msg string, args ...interface{}) {
func logVerbose(msg string, args ...any) {
if optionLogVerbose {
log.Printf(msg, args...)
}

View File

@ -25,7 +25,7 @@ import (
const (
gemojiURL = "https://raw.githubusercontent.com/github/gemoji/master/db/emoji.json"
maxUnicodeVersion = 14
maxUnicodeVersion = 15
)
var flagOut = flag.String("o", "modules/emoji/emoji_data.go", "out")

View File

@ -63,7 +63,7 @@ Outputs to 'cert.pem' and 'key.pem' and will overwrite existing files.`,
},
}
func publicKey(priv interface{}) interface{} {
func publicKey(priv any) any {
switch k := priv.(type) {
case *rsa.PrivateKey:
return &k.PublicKey
@ -74,7 +74,7 @@ func publicKey(priv interface{}) interface{} {
}
}
func pemBlockForKey(priv interface{}) *pem.Block {
func pemBlockForKey(priv any) *pem.Block {
switch k := priv.(type) {
case *rsa.PrivateKey:
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
@ -94,7 +94,7 @@ func runCert(c *cli.Context) error {
return err
}
var priv interface{}
var priv any
var err error
switch c.String("ecdsa-curve") {
case "":

View File

@ -106,5 +106,21 @@ func setupConsoleLogger(level log.Level, colorize bool, out io.Writer) {
WriterOption: log.WriterConsoleOption{Stderr: out == os.Stderr},
}
writer := log.NewEventWriterConsole("console-default", writeMode)
log.GetManager().GetLogger(log.DEFAULT).RemoveAllWriters().AddWriters(writer)
log.GetManager().GetLogger(log.DEFAULT).ReplaceAllWriters(writer)
}
// PrepareConsoleLoggerLevel by default, use INFO level for console logger, but some sub-commands (for git/ssh protocol) shouldn't output any log to stdout.
// Any log appears in git stdout pipe will break the git protocol, eg: client can't push and hangs forever.
func PrepareConsoleLoggerLevel(defaultLevel log.Level) func(*cli.Context) error {
return func(c *cli.Context) error {
level := defaultLevel
if c.Bool("quiet") || c.GlobalBoolT("quiet") {
level = log.FATAL
}
if c.Bool("debug") || c.GlobalBool("debug") || c.Bool("verbose") || c.GlobalBool("verbose") {
level = log.TRACE
}
log.SetConsoleLogger(log.DEFAULT, "console-default", level)
return nil
}
}

View File

@ -151,7 +151,7 @@ func setupDoctorDefaultLogger(ctx *cli.Context, colorize bool) {
log.FallbackErrorf("unable to create file log writer: %v", err)
return
}
log.GetManager().GetLogger(log.DEFAULT).RemoveAllWriters().AddWriters(writer)
log.GetManager().GetLogger(log.DEFAULT).ReplaceAllWriters(writer)
}
}

View File

@ -161,7 +161,7 @@ It can be used for backup and capture Gitea server image to send to maintainer`,
},
}
func fatal(format string, args ...interface{}) {
func fatal(format string, args ...any) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
log.Fatal(format, args...)
}
@ -236,7 +236,7 @@ func runDump(ctx *cli.Context) error {
return err
}
var iface interface{}
var iface any
if fileName == "-" {
iface, err = archiver.ByExtension(fmt.Sprintf(".%s", outType))
} else {

View File

@ -22,9 +22,9 @@ import (
"github.com/urfave/cli"
)
// Cmdembedded represents the available extract sub-command.
// CmdEmbedded represents the available extract sub-command.
var (
Cmdembedded = cli.Command{
CmdEmbedded = cli.Command{
Name: "embedded",
Usage: "Extract embedded resources",
Description: "A command for extracting embedded resources, like templates and images",

View File

@ -15,6 +15,7 @@ import (
"time"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/private"
repo_module "code.gitea.io/gitea/modules/repository"
"code.gitea.io/gitea/modules/setting"
@ -32,6 +33,7 @@ var (
Name: "hook",
Usage: "Delegate commands to corresponding Git hooks",
Description: "This should only be called by Git",
Before: PrepareConsoleLoggerLevel(log.FATAL),
Subcommands: []cli.Command{
subcmdHookPreReceive,
subcmdHookUpdate,

View File

@ -8,6 +8,7 @@ import (
"fmt"
"strings"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/private"
"github.com/urfave/cli"
@ -17,6 +18,7 @@ import (
var CmdKeys = cli.Command{
Name: "keys",
Usage: "This command queries the Gitea database to get the authorized command for a given ssh key fingerprint",
Before: PrepareConsoleLoggerLevel(log.FATAL),
Action: runKeys,
Flags: []cli.Flag{
cli.StringFlag{

View File

@ -178,7 +178,7 @@ func runAddConnLogger(c *cli.Context) error {
defer cancel()
setup(ctx, c.Bool("debug"))
vals := map[string]interface{}{}
vals := map[string]any{}
mode := "conn"
vals["net"] = "tcp"
if c.IsSet("protocol") {
@ -208,7 +208,7 @@ func runAddFileLogger(c *cli.Context) error {
defer cancel()
setup(ctx, c.Bool("debug"))
vals := map[string]interface{}{}
vals := map[string]any{}
mode := "file"
if c.IsSet("filename") {
vals["filename"] = c.String("filename")
@ -236,7 +236,7 @@ func runAddFileLogger(c *cli.Context) error {
return commonAddLogger(c, mode, vals)
}
func commonAddLogger(c *cli.Context, mode string, vals map[string]interface{}) error {
func commonAddLogger(c *cli.Context, mode string, vals map[string]any) error {
if len(c.String("level")) > 0 {
vals["level"] = log.LevelFromString(c.String("level")).String()
}

View File

@ -44,6 +44,7 @@ var CmdServ = cli.Command{
Name: "serv",
Usage: "This command should only be called by SSH shell",
Description: "Serv provides access auth for repositories",
Before: PrepareConsoleLoggerLevel(log.FATAL),
Action: runServ,
Flags: []cli.Flag{
cli.BoolFlag{
@ -94,7 +95,7 @@ var (
// fail prints message to stdout, it's mainly used for git serv and git hook commands.
// The output will be passed to git client and shown to user.
func fail(ctx context.Context, userMessage, logMsgFmt string, args ...interface{}) error {
func fail(ctx context.Context, userMessage, logMsgFmt string, args ...any) error {
if userMessage == "" {
userMessage = "Internal Server Error (no specific error)"
}

View File

@ -35,6 +35,7 @@ var CmdWeb = cli.Command{
Usage: "Start Gitea web server",
Description: `Gitea web server is the only thing you need to run,
and it takes care of all the other things for you`,
Before: PrepareConsoleLoggerLevel(log.INFO),
Action: runWeb,
Flags: []cli.Flag{
cli.StringFlag{
@ -206,11 +207,6 @@ func servePprof() {
}
func runWeb(ctx *cli.Context) error {
if ctx.Bool("verbose") {
setupConsoleLogger(log.TRACE, log.CanColorStdout, os.Stdout)
} else if ctx.Bool("quiet") {
setupConsoleLogger(log.FATAL, log.CanColorStdout, os.Stdout)
}
defer func() {
if panicked := recover(); panicked != nil {
log.Fatal("PANIC: %v\n%s", panicked, log.Stack(2))

View File

@ -5,7 +5,6 @@ package main
import (
"os"
"strings"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
@ -13,9 +12,6 @@ import (
"github.com/urfave/cli"
)
// EnvironmentPrefix environment variables prefixed with this represent ini values to write
const EnvironmentPrefix = "GITEA"
func main() {
app := cli.NewApp()
app.Name = "environment-to-ini"
@ -70,15 +66,6 @@ func main() {
Value: "",
Usage: "Destination file to write to",
},
cli.BoolFlag{
Name: "clear",
Usage: "Clears the matched variables from the environment",
},
cli.StringFlag{
Name: "prefix, p",
Value: EnvironmentPrefix,
Usage: "Environment prefix to look for - will be suffixed by __ (2 underscores)",
},
}
app.Action = runEnvironmentToIni
err := app.Run(os.Args)
@ -99,9 +86,7 @@ func runEnvironmentToIni(c *cli.Context) error {
log.Fatal("Failed to load custom conf '%s': %v", setting.CustomConf, err)
}
prefixGitea := c.String("prefix") + "__"
suffixFile := "__FILE"
changed := setting.EnvironmentToConfig(cfg, prefixGitea, suffixFile, os.Environ())
changed := setting.EnvironmentToConfig(cfg, os.Environ())
// try to save the config file
destination := c.String("out")
@ -116,19 +101,5 @@ func runEnvironmentToIni(c *cli.Context) error {
}
}
// clear Gitea's specific environment variables if requested
if c.Bool("clear") {
for _, kv := range os.Environ() {
idx := strings.IndexByte(kv, '=')
if idx < 0 {
continue
}
eKey := kv[:idx]
if strings.HasPrefix(eKey, prefixGitea) {
_ = os.Unsetenv(eKey)
}
}
}
return nil
}

View File

@ -193,8 +193,8 @@ RUN_USER = ; git
;; Use `ssh-keygen` to parse public SSH keys. The value is passed to the shell. By default, Gitea does the parsing itself.
;SSH_KEYGEN_PATH =
;;
;; Enable SSH Authorized Key Backup when rewriting all keys, default is true
;SSH_AUTHORIZED_KEYS_BACKUP = true
;; Enable SSH Authorized Key Backup when rewriting all keys, default is false
;SSH_AUTHORIZED_KEYS_BACKUP = false
;;
;; Determines which principals to allow
;; - empty: if SSH_TRUSTED_USER_CA_KEYS is empty this will default to off, otherwise will default to email, username.
@ -303,7 +303,10 @@ RUN_USER = ; git
;;
;;
;; LFS authentication secret, change this yourself
LFS_JWT_SECRET =
;LFS_JWT_SECRET =
;;
;; Alternative location to specify LFS authentication secret. You cannot specify both this and LFS_JWT_SECRET, and must pick one
;LFS_JWT_SECRET_URI = file:/etc/gitea/lfs_jwt_secret
;;
;; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail.
;LFS_HTTP_AUTH_EXPIRY = 24h
@ -527,6 +530,9 @@ ENABLE = true
;; This setting is only needed if JWT_SIGNING_ALGORITHM is set to HS256, HS384 or HS512.
;JWT_SECRET =
;;
;; Alternative location to specify OAuth2 authentication secret. You cannot specify both this and JWT_SECRET, and must pick one
;JWT_SECRET_URI = file:/etc/gitea/oauth2_jwt_secret
;;
;; Lifetime of an OAuth2 access token in seconds
;ACCESS_TOKEN_EXPIRATION_TIME = 3600
;;
@ -2541,8 +2547,8 @@ LEVEL = Info
;; Enable/Disable actions capabilities
;ENABLED = false
;;
;; Default address to get action plugins, e.g. the default value means downloading from "https://gitea.com/actions/checkout" for "uses: actions/checkout@v3"
;DEFAULT_ACTIONS_URL = https://gitea.com
;; Default platform to get action plugins, `github` for `https://github.com`, `self` for the current Gitea instance.
;DEFAULT_ACTIONS_URL = github
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View File

@ -2,7 +2,15 @@
if [ ! -d /data/git/.ssh ]; then
mkdir -p /data/git/.ssh
chmod 700 /data/git/.ssh
fi
# Set the correct permissions on the .ssh directory and authorized_keys file,
# or sshd will refuse to use them and lead to clone/push/pull failures.
# It could happen when users have copied their data to a new volume and changed the file permission by accident,
# and it would be very hard to troubleshoot unless users know how to check the logs of sshd which is started by s6.
chmod 700 /data/git/.ssh
if [ -f /data/git/.ssh/authorized_keys ]; then
chmod 600 /data/git/.ssh/authorized_keys
fi
if [ ! -f /data/git/.ssh/environment ]; then

View File

@ -336,7 +336,7 @@ The following configuration set `Content-Type: application/vnd.android.package-a
- `SSH_LISTEN_PORT`: **%(SSH\_PORT)s**: Port for the built-in SSH server.
- `SSH_ROOT_PATH`: **~/.ssh**: Root path of SSH directory.
- `SSH_CREATE_AUTHORIZED_KEYS_FILE`: **true**: Gitea will create a authorized_keys file by default when it is not using the internal ssh server. If you intend to use the AuthorizedKeysCommand functionality then you should turn this off.
- `SSH_AUTHORIZED_KEYS_BACKUP`: **true**: Enable SSH Authorized Key Backup when rewriting all keys, default is true.
- `SSH_AUTHORIZED_KEYS_BACKUP`: **false**: Enable SSH Authorized Key Backup when rewriting all keys, default is false.
- `SSH_TRUSTED_USER_CA_KEYS`: **\<empty\>**: Specifies the public keys of certificate authorities that are trusted to sign user certificates for authentication. Multiple keys should be comma separated. E.g.`ssh-<algorithm> <key>` or `ssh-<algorithm> <key1>, ssh-<algorithm> <key2>`. For more information see `TrustedUserCAKeys` in the sshd config man pages. When empty no file will be created and `SSH_AUTHORIZED_PRINCIPALS_ALLOW` will default to `off`.
- `SSH_TRUSTED_USER_CA_KEYS_FILENAME`: **`RUN_USER`/.ssh/gitea-trusted-user-ca-keys.pem**: Absolute path of the `TrustedUserCaKeys` file Gitea will manage. If you're running your own ssh server and you want to use the Gitea managed file you'll also need to modify your sshd_config to point to this file. The official docker image will automatically work without further configuration.
- `SSH_AUTHORIZED_PRINCIPALS_ALLOW`: **off** or **username, email**: \[off, username, email, anything\]: Specify the principals values that users are allowed to use as principal. When set to `anything` no checks are done on the principal string. When set to `off` authorized principal are not allowed to be set.
@ -368,6 +368,7 @@ The following configuration set `Content-Type: application/vnd.android.package-a
- `LFS_START_SERVER`: **false**: Enables Git LFS support.
- `LFS_CONTENT_PATH`: **%(APP_DATA_PATH)s/lfs**: Default LFS content path. (if it is on local storage.) **DEPRECATED** use settings in `[lfs]`.
- `LFS_JWT_SECRET`: **\<empty\>**: LFS authentication secret, change this a unique string.
- `LFS_JWT_SECRET_URI`: **\<empty\>**: Instead of defining LFS_JWT_SECRET in the configuration, this configuration option can be used to give Gitea a path to a file that contains the secret (example value: `file:/etc/gitea/lfs_jwt_secret`)
- `LFS_HTTP_AUTH_EXPIRY`: **24h**: LFS authentication validity period in time.Duration, pushes taking longer than this may fail.
- `LFS_MAX_FILE_SIZE`: **0**: Maximum allowed LFS file size in bytes (Set to 0 for no limit).
- `LFS_LOCKS_PAGING_NUM`: **50**: Maximum number of LFS Locks returned per page.
@ -1097,6 +1098,7 @@ This section only does "set" config, a removed config key from this section won'
- `INVALIDATE_REFRESH_TOKENS`: **false**: Check if refresh token has already been used
- `JWT_SIGNING_ALGORITHM`: **RS256**: Algorithm used to sign OAuth2 tokens. Valid values: \[`HS256`, `HS384`, `HS512`, `RS256`, `RS384`, `RS512`, `ES256`, `ES384`, `ES512`\]
- `JWT_SECRET`: **\<empty\>**: OAuth2 authentication secret for access and refresh tokens, change this to a unique string. This setting is only needed if `JWT_SIGNING_ALGORITHM` is set to `HS256`, `HS384` or `HS512`.
- `JWT_SECRET_URI`: **\<empty\>**: Instead of defining JWT_SECRET in the configuration, this configuration option can be used to give Gitea a path to a file that contains the secret (example value: `file:/etc/gitea/oauth2_jwt_secret`)
- `JWT_SIGNING_PRIVATE_KEY_FILE`: **jwt/private.pem**: Private key file path used to sign OAuth2 tokens. The path is relative to `APP_DATA_PATH`. This setting is only needed if `JWT_SIGNING_ALGORITHM` is set to `RS256`, `RS384`, `RS512`, `ES256`, `ES384` or `ES512`. The file must contain a RSA or ECDSA private key in the PKCS8 format. If no key exists a 4096 bit key will be created for you.
- `MAX_TOKEN_LENGTH`: **32767**: Maximum length of token/cookie to accept from OAuth2 provider
@ -1376,39 +1378,22 @@ PROXY_HOSTS = *.github.com
## Actions (`actions`)
- `ENABLED`: **false**: Enable/Disable actions capabilities
- `DEFAULT_ACTIONS_URL`: **https://gitea.com**: Default address to get action plugins, e.g. the default value means downloading from "<https://gitea.com/actions/checkout>" for "uses: actions/checkout@v3"
- `DEFAULT_ACTIONS_URL`: **github**: Default platform to get action plugins, `github` for `https://github.com`, `self` for the current Gitea instance.
- `STORAGE_TYPE`: **local**: Storage type for actions logs, `local` for local disk or `minio` for s3 compatible object storage service, default is `local` or other name defined with `[storage.xxx]`
- `MINIO_BASE_PATH`: **actions_log/**: Minio base path on the bucket only available when STORAGE_TYPE is `minio`
`DEFAULT_ACTIONS_URL` indicates where should we find the relative path action plugin. i.e. when use an action in a workflow file like
`DEFAULT_ACTIONS_URL` indicates where the Gitea Actions runners should find the actions with relative path.
For example, `uses: actions/checkout@v3` means `https://github.com/actions/checkout@v3` since the value of `DEFAULT_ACTIONS_URL` is `github`.
And it can be changed to `self` to make it `root_url_of_your_gitea/actions/checkout@v3`.
```yaml
name: versions
on:
push:
branches:
- main
- releases/*
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
```
Please note that using `self` is not recommended for most cases, as it could make names globally ambiguous.
Additionally, it requires you to mirror all the actions you need to your Gitea instance, which may not be worth it.
Therefore, please use `self` only if you understand what you are doing.
Now we need to know how to get actions/checkout, this configuration is the default git server to get it. That means we will get the repository via git clone ${DEFAULT_ACTIONS_URL}/actions/checkout and fetch tag v3.
To help people who don't want to mirror these actions in their git instances, the default value is https://gitea.com
To help people run actions totally in their network, they can change the value and copy all necessary action repositories into their git server.
Of course we should support the form in future PRs like
```yaml
steps:
- uses: gitea.com/actions/checkout@v3
```
although Github don't support this form.
In earlier versions (<= 1.19), `DEFAULT_ACTIONS_URL` cound be set to any custom URLs like `https://gitea.com` or `http://your-git-server,https://gitea.com`, and the default value was `https://gitea.com`.
However, later updates removed those options, and now the only options are `github` and `self`, with the default value being `github`.
However, if you want to use actions from other git server, you can use a complete URL in `uses` field, it's supported by Gitea (but not GitHub).
Like `uses: https://gitea.com/actions/checkout@v3` or `uses: http://your-git-server/actions/checkout@v3`.
## Other (`other`)

View File

@ -119,7 +119,7 @@ services:
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "222:22"
- "2222:2222"
+ depends_on:
+ - db
+
@ -288,7 +288,7 @@ docker-compose up -d
In addition to the environment variables above, any settings in `app.ini` can be set
or overridden with an environment variable of the form: `GITEA__SECTION_NAME__KEY_NAME`.
These settings are applied each time the docker container starts.
These settings are applied each time the docker container starts, and won't be passed into Gitea's sub-processes.
Full information [here](https://github.com/go-gitea/gitea/tree/main/contrib/environment-to-ini).
These environment variables can be passed to the docker container in `docker-compose.yml`.

View File

@ -289,7 +289,7 @@ docker-compose up -d
In addition to the environment variables above, any settings in `app.ini` can be set
or overridden with an environment variable of the form: `GITEA__SECTION_NAME__KEY_NAME`.
These settings are applied each time the docker container starts.
These settings are applied each time the docker container starts, and won't be passed into Gitea's sub-processes.
Full information [here](https://github.com/go-gitea/gitea/tree/master/contrib/environment-to-ini).
These environment variables can be passed to the docker container in `docker-compose.yml`.

View File

@ -164,3 +164,23 @@ Although we would like to provide more options, our limited manpower means that
However, both Gitea and act runner are completely open source, so anyone can create a new/better implementation.
We support your choice, no matter how you decide.
In case you fork act runner to create your own version: Please contribute the changes back if you can and if you think your changes will help others as well.
## What workflow trigger events does Gitea support?
All events listed in this table are supported events and are compatible with GitHub.
For events supported only by GitHub, see GitHub's [documentation](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows).
| trigger event | activity types |
|-----------------------------|--------------------------------------------------------------------------------------------------------------------------|
| create | not applicable |
| delete | not applicable |
| fork | not applicable |
| gollum | not applicable |
| push | not applicable |
| issues | `opened`, `edited`, `closed`, `reopened`, `assigned`, `unassigned`, `milestoned`, `demilestoned`, `labeled`, `unlabeled` |
| issue_comment | `created`, `edited`, `deleted` |
| pull_request | `opened`, `edited`, `closed`, `reopened`, `assigned`, `unassigned`, `synchronize`, `labeled`, `unlabeled` |
| pull_request_review | `submitted`, `edited` |
| pull_request_review_comment | `created`, `edited` |
| release | `published`, `edited` |
| registry_package | `published` |

View File

@ -164,3 +164,23 @@ defaults:
然而无论您如何决定Gitea 和act runner都是完全开源的所以任何人都可以创建一个新的/更好的实现。
我们支持您的选择,无论您如何决定。
如果您选择分支act runner来创建自己的版本请在您认为您的更改对其他人也有帮助的情况下贡献这些更改。
## Gitea 支持哪些工作流触发事件?
表格中列出的所有事件都是支持的,并且与 GitHub 兼容。
对于仅 GitHub 支持的事件,请参阅 GitHub 的[文档](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows)。
| 触发事件 | 活动类型 |
|-----------------------------|--------------------------------------------------------------------------------------------------------------------------|
| create | 不适用 |
| delete | 不适用 |
| fork | 不适用 |
| gollum | 不适用 |
| push | 不适用 |
| issues | `opened`, `edited`, `closed`, `reopened`, `assigned`, `unassigned`, `milestoned`, `demilestoned`, `labeled`, `unlabeled` |
| issue_comment | `created`, `edited`, `deleted` |
| pull_request | `opened`, `edited`, `closed`, `reopened`, `assigned`, `unassigned`, `synchronize`, `labeled`, `unlabeled` |
| pull_request_review | `submitted`, `edited` |
| pull_request_review_comment | `created`, `edited` |
| release | `published`, `edited` |
| registry_package | `published` |

30
main.go
View File

@ -87,28 +87,36 @@ func main() {
app.Description = `By default, Gitea will start serving using the web-server with no argument, which can alternatively be run by running the subcommand "web".`
app.Version = Version + formatBuiltWith()
app.EnableBashCompletion = true
app.Commands = []cli.Command{
// these sub-commands need to use config file
subCmdWithIni := []cli.Command{
cmd.CmdWeb,
cmd.CmdServ,
cmd.CmdHook,
cmd.CmdDump,
cmd.CmdCert,
cmd.CmdAdmin,
cmd.CmdGenerate,
cmd.CmdMigrate,
cmd.CmdKeys,
cmd.CmdConvert,
cmd.CmdDoctor,
cmd.CmdManager,
cmd.Cmdembedded,
cmd.CmdEmbedded,
cmd.CmdMigrateStorage,
cmd.CmdDocs,
cmd.CmdDumpRepository,
cmd.CmdRestoreRepository,
cmd.CmdActions,
cmdHelp, // TODO: the "help" sub-command was used to show the more information for "work path" and "custom config", in the future, it should avoid doing so
}
// these sub-commands do not need the config file, and they do not depend on any path or environment variable.
subCmdStandalone := []cli.Command{
cmd.CmdCert,
cmd.CmdGenerate,
cmd.CmdDocs,
}
// default configuration flags
// shared configuration flags, they are for global and for each sub-command at the same time
// eg: such command is valid: "./gitea --config /tmp/app.ini web --config /tmp/app.ini", while it's discouraged indeed
// keep in mind that the short flags like "-C", "-c" and "-w" are globally polluted, they can't be used for sub-commands anymore.
globalFlags := []cli.Flag{
cli.HelpFlag,
cli.StringFlag{
@ -128,13 +136,15 @@ func main() {
// Set the default to be equivalent to cmdWeb and add the default flags
app.Flags = append(app.Flags, globalFlags...)
app.Flags = append(app.Flags, cmd.CmdWeb.Flags...)
app.Flags = append(app.Flags, cmd.CmdWeb.Flags...) // TODO: the web flags polluted the global flags, they are not really global flags
app.Action = prepareWorkPathAndCustomConf(cmd.CmdWeb.Action)
app.HideHelp = true // use our own help action to show helps (with more information like default config)
app.Commands = append(app.Commands, cmdHelp)
for i := range app.Commands {
prepareSubcommands(&app.Commands[i], globalFlags)
app.Before = cmd.PrepareConsoleLoggerLevel(log.INFO)
for i := range subCmdWithIni {
prepareSubcommands(&subCmdWithIni[i], globalFlags)
}
app.Commands = append(app.Commands, subCmdWithIni...)
app.Commands = append(app.Commands, subCmdStandalone...)
err := app.Run(os.Args)
if err != nil {

View File

@ -344,6 +344,9 @@ func UpdateTask(ctx context.Context, task *ActionTask, cols ...string) error {
return err
}
// UpdateTaskByState updates the task by the state.
// It will always update the task if the state is not final, even there is no change.
// So it will update ActionTask.Updated to avoid the task being judged as a zombie task.
func UpdateTaskByState(ctx context.Context, state *runnerv1.TaskState) (*ActionTask, error) {
stepStates := map[int64]*runnerv1.StepState{}
for _, v := range state.Steps {
@ -384,6 +387,12 @@ func UpdateTaskByState(ctx context.Context, state *runnerv1.TaskState) (*ActionT
}, nil); err != nil {
return nil, err
}
} else {
// Force update ActionTask.Updated to avoid the task being judged as a zombie task
task.Updated = timeutil.TimeStampNow()
if err := UpdateTask(ctx, task, "updated"); err != nil {
return nil, err
}
}
if err := task.LoadAttributes(ctx); err != nil {

View File

@ -343,7 +343,7 @@ func getIssueNotification(ctx context.Context, userID, issueID int64) (*Notifica
// NotificationsForUser returns notifications for a given user and status
func NotificationsForUser(ctx context.Context, user *user_model.User, statuses []NotificationStatus, page, perPage int) (notifications NotificationList, err error) {
if len(statuses) == 0 {
return
return nil, nil
}
sess := db.GetEngine(ctx).
@ -372,16 +372,16 @@ func CountUnread(ctx context.Context, userID int64) int64 {
// LoadAttributes load Repo Issue User and Comment if not loaded
func (n *Notification) LoadAttributes(ctx context.Context) (err error) {
if err = n.loadRepo(ctx); err != nil {
return
return err
}
if err = n.loadIssue(ctx); err != nil {
return
return err
}
if err = n.loadUser(ctx); err != nil {
return
return err
}
if err = n.loadComment(ctx); err != nil {
return
return err
}
return err
}

View File

@ -44,7 +44,7 @@ func init() {
// TranslatableMessage represents JSON struct that can be translated with a Locale
type TranslatableMessage struct {
Format string
Args []interface{} `json:"omitempty"`
Args []any `json:"omitempty"`
}
// LoadRepo loads repository of the task

View File

@ -455,9 +455,9 @@ func hashAndVerifyForKeyID(sig *packet.Signature, payload string, committer *use
// CalculateTrustStatus will calculate the TrustStatus for a commit verification within a repository
// There are several trust models in Gitea
func CalculateTrustStatus(verification *CommitVerification, repoTrustModel repo_model.TrustModelType, isOwnerMemberCollaborator func(*user_model.User) (bool, error), keyMap *map[string]bool) (err error) {
func CalculateTrustStatus(verification *CommitVerification, repoTrustModel repo_model.TrustModelType, isOwnerMemberCollaborator func(*user_model.User) (bool, error), keyMap *map[string]bool) error {
if !verification.Verified {
return
return nil
}
// In the Committer trust model a signature is trusted if it matches the committer
@ -475,7 +475,7 @@ func CalculateTrustStatus(verification *CommitVerification, repoTrustModel repo_
verification.SigningUser.Email == verification.CommittingUser.Email) {
verification.TrustStatus = "trusted"
}
return
return nil
}
// Now we drop to the more nuanced trust models...
@ -490,10 +490,11 @@ func CalculateTrustStatus(verification *CommitVerification, repoTrustModel repo_
verification.SigningUser.Email != verification.CommittingUser.Email) {
verification.TrustStatus = "untrusted"
}
return
return nil
}
// Check we actually have a GPG SigningKey
var err error
if verification.SigningKey != nil {
var isMember bool
if keyMap != nil {

View File

@ -111,7 +111,7 @@ func populateHash(hashFunc crypto.Hash, msg []byte) (hash.Hash, error) {
func readArmoredSign(r io.Reader) (body io.Reader, err error) {
block, err := armor.Decode(r)
if err != nil {
return
return nil, err
}
if block.Type != openpgp.SignatureType {
return nil, fmt.Errorf("expected '" + openpgp.SignatureType + "', got: " + block.Type)

View File

@ -47,7 +47,7 @@ var sshOpLocker sync.Mutex
// AuthorizedStringForKey creates the authorized keys string appropriate for the provided key
func AuthorizedStringForKey(key *PublicKey) string {
sb := &strings.Builder{}
_ = setting.SSH.AuthorizedKeysCommandTemplateTemplate.Execute(sb, map[string]interface{}{
_ = setting.SSH.AuthorizedKeysCommandTemplateTemplate.Execute(sb, map[string]any{
"AppPath": util.ShellEscape(setting.AppPath),
"AppWorkPath": util.ShellEscape(setting.AppWorkPath),
"CustomConf": util.ShellEscape(setting.CustomConf),
@ -175,7 +175,7 @@ func RewriteAllPublicKeys() error {
// RegeneratePublicKeys regenerates the authorized_keys file
func RegeneratePublicKeys(ctx context.Context, t io.StringWriter) error {
if err := db.GetEngine(ctx).Where("type != ?", KeyTypePrincipal).Iterate(new(PublicKey), func(idx int, bean interface{}) (err error) {
if err := db.GetEngine(ctx).Where("type != ?", KeyTypePrincipal).Iterate(new(PublicKey), func(idx int, bean any) (err error) {
_, err = t.WriteString((bean.(*PublicKey)).AuthorizedString())
return err
}); err != nil {

View File

@ -97,7 +97,7 @@ func RewriteAllPrincipalKeys(ctx context.Context) error {
}
func regeneratePrincipalKeys(ctx context.Context, t io.StringWriter) error {
if err := db.GetEngine(ctx).Where("type = ?", KeyTypePrincipal).Iterate(new(PublicKey), func(idx int, bean interface{}) (err error) {
if err := db.GetEngine(ctx).Where("type = ?", KeyTypePrincipal).Iterate(new(PublicKey), func(idx int, bean any) (err error) {
_, err = t.WriteString((bean.(*PublicKey)).AuthorizedString())
return err
}); err != nil {

View File

@ -306,9 +306,10 @@ func (code *OAuth2AuthorizationCode) TableName() string {
}
// GenerateRedirectURI generates a redirect URI for a successful authorization request. State will be used if not empty.
func (code *OAuth2AuthorizationCode) GenerateRedirectURI(state string) (redirect *url.URL, err error) {
if redirect, err = url.Parse(code.RedirectURI); err != nil {
return
func (code *OAuth2AuthorizationCode) GenerateRedirectURI(state string) (*url.URL, error) {
redirect, err := url.Parse(code.RedirectURI)
if err != nil {
return nil, err
}
q := redirect.Query()
if state != "" {

View File

@ -52,7 +52,7 @@ func (ctx *Context) Engine() Engine {
}
// Value shadows Value for context.Context but allows us to get ourselves and an Engined object
func (ctx *Context) Value(key interface{}) interface{} {
func (ctx *Context) Value(key any) any {
if key == enginedContextKey {
return ctx
}
@ -163,28 +163,28 @@ func txWithNoCheck(parentCtx context.Context, f func(ctx context.Context) error)
}
// Insert inserts records into database
func Insert(ctx context.Context, beans ...interface{}) error {
func Insert(ctx context.Context, beans ...any) error {
_, err := GetEngine(ctx).Insert(beans...)
return err
}
// Exec executes a sql with args
func Exec(ctx context.Context, sqlAndArgs ...interface{}) (sql.Result, error) {
func Exec(ctx context.Context, sqlAndArgs ...any) (sql.Result, error) {
return GetEngine(ctx).Exec(sqlAndArgs...)
}
// GetByBean filled empty fields of the bean according non-empty fields to query in database.
func GetByBean(ctx context.Context, bean interface{}) (bool, error) {
func GetByBean(ctx context.Context, bean any) (bool, error) {
return GetEngine(ctx).Get(bean)
}
// DeleteByBean deletes all records according non-empty fields of the bean as conditions.
func DeleteByBean(ctx context.Context, bean interface{}) (int64, error) {
func DeleteByBean(ctx context.Context, bean any) (int64, error) {
return GetEngine(ctx).Delete(bean)
}
// DeleteByID deletes the given bean with the given ID
func DeleteByID(ctx context.Context, id int64, bean interface{}) (int64, error) {
func DeleteByID(ctx context.Context, id int64, bean any) (int64, error) {
return GetEngine(ctx).ID(id).NoAutoTime().Delete(bean)
}
@ -203,13 +203,13 @@ func FindIDs(ctx context.Context, tableName, idCol string, cond builder.Cond) ([
// DecrByIDs decreases the given column for entities of the "bean" type with one of the given ids by one
// Timestamps of the entities won't be updated
func DecrByIDs(ctx context.Context, ids []int64, decrCol string, bean interface{}) error {
func DecrByIDs(ctx context.Context, ids []int64, decrCol string, bean any) error {
_, err := GetEngine(ctx).Decr(decrCol).In("id", ids).NoAutoCondition().NoAutoTime().Update(bean)
return err
}
// DeleteBeans deletes all given beans, beans must contain delete conditions.
func DeleteBeans(ctx context.Context, beans ...interface{}) (err error) {
func DeleteBeans(ctx context.Context, beans ...any) (err error) {
e := GetEngine(ctx)
for i := range beans {
if _, err = e.Delete(beans[i]); err != nil {
@ -220,7 +220,7 @@ func DeleteBeans(ctx context.Context, beans ...interface{}) (err error) {
}
// TruncateBeans deletes all given beans, beans may contain delete conditions.
func TruncateBeans(ctx context.Context, beans ...interface{}) (err error) {
func TruncateBeans(ctx context.Context, beans ...any) (err error) {
e := GetEngine(ctx)
for i := range beans {
if _, err = e.Truncate(beans[i]); err != nil {
@ -231,12 +231,12 @@ func TruncateBeans(ctx context.Context, beans ...interface{}) (err error) {
}
// CountByBean counts the number of database records according non-empty fields of the bean as conditions.
func CountByBean(ctx context.Context, bean interface{}) (int64, error) {
func CountByBean(ctx context.Context, bean any) (int64, error) {
return GetEngine(ctx).Count(bean)
}
// TableName returns the table name according a bean object
func TableName(bean interface{}) string {
func TableName(bean any) string {
return x.TableName(bean)
}

View File

@ -25,7 +25,7 @@ import (
var (
x *xorm.Engine
tables []interface{}
tables []any
initFuncs []func() error
// HasEngine specifies if we have a xorm.Engine
@ -34,41 +34,41 @@ var (
// Engine represents a xorm engine or session.
type Engine interface {
Table(tableNameOrBean interface{}) *xorm.Session
Count(...interface{}) (int64, error)
Decr(column string, arg ...interface{}) *xorm.Session
Delete(...interface{}) (int64, error)
Truncate(...interface{}) (int64, error)
Exec(...interface{}) (sql.Result, error)
Find(interface{}, ...interface{}) error
Get(beans ...interface{}) (bool, error)
ID(interface{}) *xorm.Session
In(string, ...interface{}) *xorm.Session
Incr(column string, arg ...interface{}) *xorm.Session
Insert(...interface{}) (int64, error)
Iterate(interface{}, xorm.IterFunc) error
Join(joinOperator string, tablename, condition interface{}, args ...interface{}) *xorm.Session
SQL(interface{}, ...interface{}) *xorm.Session
Where(interface{}, ...interface{}) *xorm.Session
Table(tableNameOrBean any) *xorm.Session
Count(...any) (int64, error)
Decr(column string, arg ...any) *xorm.Session
Delete(...any) (int64, error)
Truncate(...any) (int64, error)
Exec(...any) (sql.Result, error)
Find(any, ...any) error
Get(beans ...any) (bool, error)
ID(any) *xorm.Session
In(string, ...any) *xorm.Session
Incr(column string, arg ...any) *xorm.Session
Insert(...any) (int64, error)
Iterate(any, xorm.IterFunc) error
Join(joinOperator string, tablename, condition any, args ...any) *xorm.Session
SQL(any, ...any) *xorm.Session
Where(any, ...any) *xorm.Session
Asc(colNames ...string) *xorm.Session
Desc(colNames ...string) *xorm.Session
Limit(limit int, start ...int) *xorm.Session
NoAutoTime() *xorm.Session
SumInt(bean interface{}, columnName string) (res int64, err error)
Sync2(...interface{}) error
SumInt(bean any, columnName string) (res int64, err error)
Sync2(...any) error
Select(string) *xorm.Session
NotIn(string, ...interface{}) *xorm.Session
OrderBy(interface{}, ...interface{}) *xorm.Session
Exist(...interface{}) (bool, error)
NotIn(string, ...any) *xorm.Session
OrderBy(any, ...any) *xorm.Session
Exist(...any) (bool, error)
Distinct(...string) *xorm.Session
Query(...interface{}) ([]map[string][]byte, error)
Query(...any) ([]map[string][]byte, error)
Cols(...string) *xorm.Session
Context(ctx context.Context) *xorm.Session
Ping() error
}
// TableInfo returns table's information via an object
func TableInfo(v interface{}) (*schemas.Table, error) {
func TableInfo(v any) (*schemas.Table, error) {
return x.TableInfo(v)
}
@ -78,7 +78,7 @@ func DumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) erro
}
// RegisterModel registers model, if initfunc provided, it will be invoked after data model sync
func RegisterModel(bean interface{}, initFunc ...func() error) {
func RegisterModel(bean any, initFunc ...func() error) {
tables = append(tables, bean)
if len(initFuncs) > 0 && initFunc[0] != nil {
initFuncs = append(initFuncs, initFunc[0])
@ -209,14 +209,14 @@ func InitEngineWithMigration(ctx context.Context, migrateFunc func(*xorm.Engine)
}
// NamesToBean return a list of beans or an error
func NamesToBean(names ...string) ([]interface{}, error) {
beans := []interface{}{}
func NamesToBean(names ...string) ([]any, error) {
beans := []any{}
if len(names) == 0 {
beans = append(beans, tables...)
return beans, nil
}
// Need to map provided names to beans...
beanMap := make(map[string]interface{})
beanMap := make(map[string]any)
for _, bean := range tables {
beanMap[strings.ToLower(reflect.Indirect(reflect.ValueOf(bean)).Type().Name())] = bean
@ -224,7 +224,7 @@ func NamesToBean(names ...string) ([]interface{}, error) {
beanMap[strings.ToLower(x.TableName(bean, true))] = bean
}
gotBean := make(map[interface{}]bool)
gotBean := make(map[any]bool)
for _, name := range names {
bean, ok := beanMap[strings.ToLower(strings.TrimSpace(name))]
if !ok {
@ -266,7 +266,7 @@ func DumpDatabase(filePath, dbType string) error {
}
// MaxBatchInsertSize returns the table's max batch insert size
func MaxBatchInsertSize(bean interface{}) int {
func MaxBatchInsertSize(bean any) int {
t, err := x.TableInfo(bean)
if err != nil {
return 50
@ -286,7 +286,7 @@ func DeleteAllRecords(tableName string) error {
}
// GetMaxID will return max id of the table
func GetMaxID(beanOrTableName interface{}) (maxID int64, err error) {
func GetMaxID(beanOrTableName any) (maxID int64, err error) {
_, err = x.Select("MAX(id)").Table(beanOrTableName).Get(&maxID)
return maxID, err
}

View File

@ -25,7 +25,7 @@ func (err ErrCancelled) Error() string {
}
// ErrCancelledf returns an ErrCancelled for the provided format and args
func ErrCancelledf(format string, args ...interface{}) error {
func ErrCancelledf(format string, args ...any) error {
return ErrCancelled{
fmt.Sprintf(format, args...),
}

View File

@ -28,47 +28,47 @@ func NewXORMLogger(showSQL bool) xormlog.Logger {
const stackLevel = 8
// Log a message with defined skip and at logging level
func (l *XORMLogBridge) Log(skip int, level log.Level, format string, v ...interface{}) {
func (l *XORMLogBridge) Log(skip int, level log.Level, format string, v ...any) {
l.logger.Log(skip+1, level, format, v...)
}
// Debug show debug log
func (l *XORMLogBridge) Debug(v ...interface{}) {
func (l *XORMLogBridge) Debug(v ...any) {
l.Log(stackLevel, log.DEBUG, "%s", fmt.Sprint(v...))
}
// Debugf show debug log
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) {
func (l *XORMLogBridge) Debugf(format string, v ...any) {
l.Log(stackLevel, log.DEBUG, format, v...)
}
// Error show error log
func (l *XORMLogBridge) Error(v ...interface{}) {
func (l *XORMLogBridge) Error(v ...any) {
l.Log(stackLevel, log.ERROR, "%s", fmt.Sprint(v...))
}
// Errorf show error log
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) {
func (l *XORMLogBridge) Errorf(format string, v ...any) {
l.Log(stackLevel, log.ERROR, format, v...)
}
// Info show information level log
func (l *XORMLogBridge) Info(v ...interface{}) {
func (l *XORMLogBridge) Info(v ...any) {
l.Log(stackLevel, log.INFO, "%s", fmt.Sprint(v...))
}
// Infof show information level log
func (l *XORMLogBridge) Infof(format string, v ...interface{}) {
func (l *XORMLogBridge) Infof(format string, v ...any) {
l.Log(stackLevel, log.INFO, format, v...)
}
// Warn show warning log
func (l *XORMLogBridge) Warn(v ...interface{}) {
func (l *XORMLogBridge) Warn(v ...any) {
l.Log(stackLevel, log.WARN, "%s", fmt.Sprint(v...))
}
// Warnf show warnning log
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) {
func (l *XORMLogBridge) Warnf(format string, v ...any) {
l.Log(stackLevel, log.WARN, format, v...)
}

View File

@ -20,6 +20,10 @@ const (
SearchOrderByNewest SearchOrderBy = "created_unix DESC"
SearchOrderBySize SearchOrderBy = "size ASC"
SearchOrderBySizeReverse SearchOrderBy = "size DESC"
SearchOrderByGitSize SearchOrderBy = "git_size ASC"
SearchOrderByGitSizeReverse SearchOrderBy = "git_size DESC"
SearchOrderByLFSSize SearchOrderBy = "lfs_size ASC"
SearchOrderByLFSSizeReverse SearchOrderBy = "lfs_size DESC"
SearchOrderByID SearchOrderBy = "id ASC"
SearchOrderByIDReverse SearchOrderBy = "id DESC"
SearchOrderByStars SearchOrderBy = "num_stars ASC"

View File

@ -7,6 +7,7 @@ import (
"context"
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"strconv"
@ -21,6 +22,7 @@ var defaultFileBlockSize int64 = 32 * 1024
type File interface {
io.ReadWriteCloser
io.Seeker
fs.File
}
type file struct {
@ -193,10 +195,26 @@ func (f *file) Close() error {
return nil
}
func (f *file) Stat() (os.FileInfo, error) {
if f.metaID == 0 {
return nil, os.ErrInvalid
}
fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
if err != nil {
return nil, err
}
return fileMeta, nil
}
func timeToFileTimestamp(t time.Time) int64 {
return t.UnixMicro()
}
func fileTimestampToTime(timestamp int64) time.Time {
return time.UnixMicro(timestamp)
}
func (f *file) loadMetaByPath() (*dbfsMeta, error) {
var fileMeta dbfsMeta
if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil {

View File

@ -5,7 +5,10 @@ package dbfs
import (
"context"
"io/fs"
"os"
"path"
"time"
"code.gitea.io/gitea/models/db"
)
@ -100,3 +103,29 @@ func Remove(ctx context.Context, name string) error {
defer f.Close()
return f.delete()
}
var _ fs.FileInfo = (*dbfsMeta)(nil)
func (m *dbfsMeta) Name() string {
return path.Base(m.FullPath)
}
func (m *dbfsMeta) Size() int64 {
return m.FileSize
}
func (m *dbfsMeta) Mode() fs.FileMode {
return os.ModePerm
}
func (m *dbfsMeta) ModTime() time.Time {
return fileTimestampToTime(m.ModifyTimestamp)
}
func (m *dbfsMeta) IsDir() bool {
return false
}
func (m *dbfsMeta) Sys() any {
return nil
}

View File

@ -111,6 +111,19 @@ func TestDbfsBasic(t *testing.T) {
_, err = OpenFile(db.DefaultContext, "test2.txt", os.O_RDONLY)
assert.Error(t, err)
// test stat
f, err = OpenFile(db.DefaultContext, "test/test.txt", os.O_RDWR|os.O_CREATE)
assert.NoError(t, err)
stat, err := f.Stat()
assert.NoError(t, err)
assert.EqualValues(t, "test.txt", stat.Name())
assert.EqualValues(t, 0, stat.Size())
_, err = f.Write([]byte("0123456789"))
assert.NoError(t, err)
stat, err = f.Stat()
assert.NoError(t, err)
assert.EqualValues(t, 10, stat.Size())
}
func TestDbfsReadWrite(t *testing.T) {

View File

@ -318,90 +318,6 @@ func (err ErrFilePathProtected) Unwrap() error {
return util.ErrPermissionDenied
}
// __________ .__
// \______ \____________ ____ ____ | |__
// | | _/\_ __ \__ \ / \_/ ___\| | \
// | | \ | | \// __ \| | \ \___| Y \
// |______ / |__| (____ /___| /\___ >___| /
// \/ \/ \/ \/ \/
// ErrBranchDoesNotExist represents an error that branch with such name does not exist.
type ErrBranchDoesNotExist struct {
BranchName string
}
// IsErrBranchDoesNotExist checks if an error is an ErrBranchDoesNotExist.
func IsErrBranchDoesNotExist(err error) bool {
_, ok := err.(ErrBranchDoesNotExist)
return ok
}
func (err ErrBranchDoesNotExist) Error() string {
return fmt.Sprintf("branch does not exist [name: %s]", err.BranchName)
}
func (err ErrBranchDoesNotExist) Unwrap() error {
return util.ErrNotExist
}
// ErrBranchAlreadyExists represents an error that branch with such name already exists.
type ErrBranchAlreadyExists struct {
BranchName string
}
// IsErrBranchAlreadyExists checks if an error is an ErrBranchAlreadyExists.
func IsErrBranchAlreadyExists(err error) bool {
_, ok := err.(ErrBranchAlreadyExists)
return ok
}
func (err ErrBranchAlreadyExists) Error() string {
return fmt.Sprintf("branch already exists [name: %s]", err.BranchName)
}
func (err ErrBranchAlreadyExists) Unwrap() error {
return util.ErrAlreadyExist
}
// ErrBranchNameConflict represents an error that branch name conflicts with other branch.
type ErrBranchNameConflict struct {
BranchName string
}
// IsErrBranchNameConflict checks if an error is an ErrBranchNameConflict.
func IsErrBranchNameConflict(err error) bool {
_, ok := err.(ErrBranchNameConflict)
return ok
}
func (err ErrBranchNameConflict) Error() string {
return fmt.Sprintf("branch conflicts with existing branch [name: %s]", err.BranchName)
}
func (err ErrBranchNameConflict) Unwrap() error {
return util.ErrAlreadyExist
}
// ErrBranchesEqual represents an error that branch name conflicts with other branch.
type ErrBranchesEqual struct {
BaseBranchName string
HeadBranchName string
}
// IsErrBranchesEqual checks if an error is an ErrBranchesEqual.
func IsErrBranchesEqual(err error) bool {
_, ok := err.(ErrBranchesEqual)
return ok
}
func (err ErrBranchesEqual) Error() string {
return fmt.Sprintf("branches are equal [head: %sm base: %s]", err.HeadBranchName, err.BaseBranchName)
}
func (err ErrBranchesEqual) Unwrap() error {
return util.ErrInvalidArgument
}
// ErrDisallowedToMerge represents an error that a branch is protected and the current user is not allowed to modify it.
type ErrDisallowedToMerge struct {
Reason string

View File

@ -0,0 +1,47 @@
-
id: 1
repo_id: 1
name: 'foo'
commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'
commit_message: 'first commit'
commit_time: 978307100
pusher_id: 1
is_deleted: true
deleted_by_id: 1
deleted_unix: 978307200
-
id: 2
repo_id: 1
name: 'bar'
commit_id: '62fb502a7172d4453f0322a2cc85bddffa57f07a'
commit_message: 'second commit'
commit_time: 978307100
pusher_id: 1
is_deleted: true
deleted_by_id: 99
deleted_unix: 978307200
-
id: 3
repo_id: 1
name: 'branch2'
commit_id: '985f0301dba5e7b34be866819cd15ad3d8f508ee'
commit_message: 'make pull5 outdated'
commit_time: 1579166279
pusher_id: 1
is_deleted: false
deleted_by_id: 0
deleted_unix: 0
-
id: 4
repo_id: 1
name: 'master'
commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'
commit_message: 'Initial commit'
commit_time: 1489927679
pusher_id: 1
is_deleted: false
deleted_by_id: 0
deleted_unix: 0

View File

@ -1,15 +0,0 @@
-
id: 1
repo_id: 1
name: foo
commit: 1213212312313213213132131
deleted_by_id: 1
deleted_unix: 978307200
-
id: 2
repo_id: 1
name: bar
commit: 5655464564554545466464655
deleted_by_id: 99
deleted_unix: 978307200

View File

@ -0,0 +1,49 @@
-
id: 1
repo_id: 5
interval: 3600
enable_prune: false
updated_unix: 0
next_update_unix: 0
lfs_enabled: false
lfs_endpoint: ""
-
id: 2
repo_id: 25
interval: 3600
enable_prune: false
updated_unix: 0
next_update_unix: 0
lfs_enabled: false
lfs_endpoint: ""
-
id: 3
repo_id: 26
interval: 3600
enable_prune: false
updated_unix: 0
next_update_unix: 0
lfs_enabled: false
lfs_endpoint: ""
-
id: 4
repo_id: 27
interval: 3600
enable_prune: false
updated_unix: 0
next_update_unix: 0
lfs_enabled: false
lfs_endpoint: ""
-
id: 5
repo_id: 28
interval: 3600
enable_prune: false
updated_unix: 0
next_update_unix: 0
lfs_enabled: false
lfs_endpoint: ""

View File

@ -81,3 +81,21 @@
uid: 5
org_id: 23
is_public: false
-
id: 15
uid: 1
org_id: 35
is_public: true
-
id: 16
uid: 1
org_id: 36
is_public: true
-
id: 17
uid: 5
org_id: 36
is_public: true

View File

@ -141,7 +141,7 @@
num_projects: 0
num_closed_projects: 0
is_private: true
is_empty: true
is_empty: false
is_archived: false
is_mirror: true
status: 0

View File

@ -184,3 +184,36 @@
num_members: 1
includes_all_repositories: false
can_create_org_repo: true
-
id: 18
org_id: 35
lower_name: owners
name: Owners
authorize: 4 # owner
num_repos: 0
num_members: 1
includes_all_repositories: false
can_create_org_repo: true
-
id: 19
org_id: 36
lower_name: owners
name: Owners
authorize: 4 # owner
num_repos: 0
num_members: 1
includes_all_repositories: false
can_create_org_repo: true
-
id: 20
org_id: 36
lower_name: team20writepackage
name: team20writepackage
authorize: 1
num_repos: 0
num_members: 1
includes_all_repositories: false
can_create_org_repo: true

View File

@ -273,4 +273,10 @@
id: 46
team_id: 17
type: 9 # package
access_mode: 0
access_mode: 2
-
id: 47
team_id: 20
type: 9 # package
access_mode: 2

View File

@ -105,3 +105,21 @@
org_id: 23
team_id: 17
uid: 5
-
id: 19
org_id: 35
team_id: 18
uid: 1
-
id: 20
org_id: 36
team_id: 19
uid: 1
-
id: 21
org_id: 36
team_id: 20
uid: 5

View File

@ -1258,3 +1258,77 @@
repo_admin_change_team_access: false
theme: ""
keep_activity_private: false
-
id: 35
lower_name: private_org35
name: private_org35
full_name: Private Org 35
email: private_org35@example.com
keep_email_private: false
email_notifications_preference: enabled
passwd: ZogKvWdyEx:password
passwd_hash_algo: dummy
must_change_password: false
login_source: 0
login_name: private_org35
type: 1
salt: ZogKvWdyEx
max_repo_creation: -1
is_active: true
is_admin: false
is_restricted: false
allow_git_hook: false
allow_import_local: false
allow_create_organization: true
prohibit_login: false
avatar: avatar35
avatar_email: private_org35@example.com
use_custom_avatar: false
num_followers: 0
num_following: 0
num_stars: 0
num_repos: 0
num_teams: 1
num_members: 1
visibility: 2
repo_admin_change_team_access: false
theme: ""
keep_activity_private: false
-
id: 36
lower_name: limited_org36
name: limited_org36
full_name: Limited Org 36
email: limited_org36@example.com
keep_email_private: false
email_notifications_preference: enabled
passwd: ZogKvWdyEx:password
passwd_hash_algo: dummy
must_change_password: false
login_source: 0
login_name: limited_org36
type: 1
salt: ZogKvWdyEx
max_repo_creation: -1
is_active: true
is_admin: false
is_restricted: false
allow_git_hook: false
allow_import_local: false
allow_create_organization: true
prohibit_login: false
avatar: avatar22
avatar_email: limited_org36@example.com
use_custom_avatar: false
num_followers: 0
num_following: 0
num_stars: 0
num_repos: 0
num_teams: 2
num_members: 2
visibility: 1
repo_admin_change_team_access: false
theme: ""
keep_activity_private: false

401
models/git/branch.go Normal file
View File

@ -0,0 +1,401 @@
// Copyright 2016 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package git
import (
"context"
"fmt"
"time"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
// ErrBranchNotExist represents an error that branch with such name does not exist.
type ErrBranchNotExist struct {
RepoID int64
BranchName string
}
// IsErrBranchNotExist checks if an error is an ErrBranchDoesNotExist.
func IsErrBranchNotExist(err error) bool {
_, ok := err.(ErrBranchNotExist)
return ok
}
func (err ErrBranchNotExist) Error() string {
return fmt.Sprintf("branch does not exist [repo_id: %d name: %s]", err.RepoID, err.BranchName)
}
func (err ErrBranchNotExist) Unwrap() error {
return util.ErrNotExist
}
// ErrBranchAlreadyExists represents an error that branch with such name already exists.
type ErrBranchAlreadyExists struct {
BranchName string
}
// IsErrBranchAlreadyExists checks if an error is an ErrBranchAlreadyExists.
func IsErrBranchAlreadyExists(err error) bool {
_, ok := err.(ErrBranchAlreadyExists)
return ok
}
func (err ErrBranchAlreadyExists) Error() string {
return fmt.Sprintf("branch already exists [name: %s]", err.BranchName)
}
func (err ErrBranchAlreadyExists) Unwrap() error {
return util.ErrAlreadyExist
}
// ErrBranchNameConflict represents an error that branch name conflicts with other branch.
type ErrBranchNameConflict struct {
BranchName string
}
// IsErrBranchNameConflict checks if an error is an ErrBranchNameConflict.
func IsErrBranchNameConflict(err error) bool {
_, ok := err.(ErrBranchNameConflict)
return ok
}
func (err ErrBranchNameConflict) Error() string {
return fmt.Sprintf("branch conflicts with existing branch [name: %s]", err.BranchName)
}
func (err ErrBranchNameConflict) Unwrap() error {
return util.ErrAlreadyExist
}
// ErrBranchesEqual represents an error that base branch is equal to the head branch.
type ErrBranchesEqual struct {
BaseBranchName string
HeadBranchName string
}
// IsErrBranchesEqual checks if an error is an ErrBranchesEqual.
func IsErrBranchesEqual(err error) bool {
_, ok := err.(ErrBranchesEqual)
return ok
}
func (err ErrBranchesEqual) Error() string {
return fmt.Sprintf("branches are equal [head: %sm base: %s]", err.HeadBranchName, err.BaseBranchName)
}
func (err ErrBranchesEqual) Unwrap() error {
return util.ErrInvalidArgument
}
// Branch represents a branch of a repository
// For those repository who have many branches, stored into database is a good choice
// for pagination, keyword search and filtering
type Branch struct {
ID int64
RepoID int64 `xorm:"UNIQUE(s)"`
Name string `xorm:"UNIQUE(s) NOT NULL"` // git's ref-name is case-sensitive internally, however, in some databases (mssql, mysql, by default), it's case-insensitive at the moment
CommitID string
CommitMessage string `xorm:"TEXT"` // it only stores the message summary (the first line)
PusherID int64
Pusher *user_model.User `xorm:"-"`
IsDeleted bool `xorm:"index"`
DeletedByID int64
DeletedBy *user_model.User `xorm:"-"`
DeletedUnix timeutil.TimeStamp `xorm:"index"`
CommitTime timeutil.TimeStamp // The commit
CreatedUnix timeutil.TimeStamp `xorm:"created"`
UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
}
func (b *Branch) LoadDeletedBy(ctx context.Context) (err error) {
if b.DeletedBy == nil {
b.DeletedBy, err = user_model.GetUserByID(ctx, b.DeletedByID)
if user_model.IsErrUserNotExist(err) {
b.DeletedBy = user_model.NewGhostUser()
err = nil
}
}
return err
}
func (b *Branch) LoadPusher(ctx context.Context) (err error) {
if b.Pusher == nil && b.PusherID > 0 {
b.Pusher, err = user_model.GetUserByID(ctx, b.PusherID)
if user_model.IsErrUserNotExist(err) {
b.Pusher = user_model.NewGhostUser()
err = nil
}
}
return err
}
func init() {
db.RegisterModel(new(Branch))
db.RegisterModel(new(RenamedBranch))
}
func GetBranch(ctx context.Context, repoID int64, branchName string) (*Branch, error) {
var branch Branch
has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).And("name=?", branchName).Get(&branch)
if err != nil {
return nil, err
} else if !has {
return nil, ErrBranchNotExist{
RepoID: repoID,
BranchName: branchName,
}
}
return &branch, nil
}
func AddBranches(ctx context.Context, branches []*Branch) error {
for _, branch := range branches {
if _, err := db.GetEngine(ctx).Insert(branch); err != nil {
return err
}
}
return nil
}
func GetDeletedBranchByID(ctx context.Context, repoID, branchID int64) (*Branch, error) {
var branch Branch
has, err := db.GetEngine(ctx).ID(branchID).Get(&branch)
if err != nil {
return nil, err
} else if !has {
return nil, ErrBranchNotExist{
RepoID: repoID,
}
}
if branch.RepoID != repoID {
return nil, ErrBranchNotExist{
RepoID: repoID,
}
}
if !branch.IsDeleted {
return nil, ErrBranchNotExist{
RepoID: repoID,
}
}
return &branch, nil
}
func DeleteBranches(ctx context.Context, repoID, doerID int64, branchIDs []int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
branches := make([]*Branch, 0, len(branchIDs))
if err := db.GetEngine(ctx).In("id", branchIDs).Find(&branches); err != nil {
return err
}
for _, branch := range branches {
if err := AddDeletedBranch(ctx, repoID, branch.Name, doerID); err != nil {
return err
}
}
return nil
})
}
// UpdateBranch updates the branch information in the database. If the branch exist, it will update latest commit of this branch information
// If it doest not exist, insert a new record into database
func UpdateBranch(ctx context.Context, repoID, pusherID int64, branchName string, commit *git.Commit) error {
cnt, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repoID, branchName).
Cols("commit_id, commit_message, pusher_id, commit_time, is_deleted, updated_unix").
Update(&Branch{
CommitID: commit.ID.String(),
CommitMessage: commit.Summary(),
PusherID: pusherID,
CommitTime: timeutil.TimeStamp(commit.Committer.When.Unix()),
IsDeleted: false,
})
if err != nil {
return err
}
if cnt > 0 {
return nil
}
return db.Insert(ctx, &Branch{
RepoID: repoID,
Name: branchName,
CommitID: commit.ID.String(),
CommitMessage: commit.Summary(),
PusherID: pusherID,
CommitTime: timeutil.TimeStamp(commit.Committer.When.Unix()),
})
}
// AddDeletedBranch adds a deleted branch to the database
func AddDeletedBranch(ctx context.Context, repoID int64, branchName string, deletedByID int64) error {
branch, err := GetBranch(ctx, repoID, branchName)
if err != nil {
return err
}
if branch.IsDeleted {
return nil
}
cnt, err := db.GetEngine(ctx).Where("repo_id=? AND name=? AND is_deleted=?", repoID, branchName, false).
Cols("is_deleted, deleted_by_id, deleted_unix").
Update(&Branch{
IsDeleted: true,
DeletedByID: deletedByID,
DeletedUnix: timeutil.TimeStampNow(),
})
if err != nil {
return err
}
if cnt == 0 {
return fmt.Errorf("branch %s not found or has been deleted", branchName)
}
return err
}
func RemoveDeletedBranchByID(ctx context.Context, repoID, branchID int64) error {
_, err := db.GetEngine(ctx).Where("repo_id=? AND id=? AND is_deleted = ?", repoID, branchID, true).Delete(new(Branch))
return err
}
// RemoveOldDeletedBranches removes old deleted branches
func RemoveOldDeletedBranches(ctx context.Context, olderThan time.Duration) {
// Nothing to do for shutdown or terminate
log.Trace("Doing: DeletedBranchesCleanup")
deleteBefore := time.Now().Add(-olderThan)
_, err := db.GetEngine(ctx).Where("is_deleted=? AND deleted_unix < ?", true, deleteBefore.Unix()).Delete(new(Branch))
if err != nil {
log.Error("DeletedBranchesCleanup: %v", err)
}
}
// RenamedBranch provide renamed branch log
// will check it when a branch can't be found
type RenamedBranch struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX NOT NULL"`
From string
To string
CreatedUnix timeutil.TimeStamp `xorm:"created"`
}
// FindRenamedBranch check if a branch was renamed
func FindRenamedBranch(ctx context.Context, repoID int64, from string) (branch *RenamedBranch, exist bool, err error) {
branch = &RenamedBranch{
RepoID: repoID,
From: from,
}
exist, err = db.GetEngine(ctx).Get(branch)
return branch, exist, err
}
// RenameBranch rename a branch
func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to string, gitAction func(isDefault bool) error) (err error) {
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
}
defer committer.Close()
sess := db.GetEngine(ctx)
// 1. update branch in database
if n, err := sess.Where("repo_id=? AND name=?", repo.ID, from).Update(&Branch{
Name: to,
}); err != nil {
return err
} else if n <= 0 {
return ErrBranchNotExist{
RepoID: repo.ID,
BranchName: from,
}
}
// 2. update default branch if needed
isDefault := repo.DefaultBranch == from
if isDefault {
repo.DefaultBranch = to
_, err = sess.ID(repo.ID).Cols("default_branch").Update(repo)
if err != nil {
return err
}
}
// 3. Update protected branch if needed
protectedBranch, err := GetProtectedBranchRuleByName(ctx, repo.ID, from)
if err != nil {
return err
}
if protectedBranch != nil {
// there is a protect rule for this branch
protectedBranch.RuleName = to
_, err = sess.ID(protectedBranch.ID).Cols("branch_name").Update(protectedBranch)
if err != nil {
return err
}
} else {
// some glob protect rules may match this branch
protected, err := IsBranchProtected(ctx, repo.ID, from)
if err != nil {
return err
}
if protected {
return ErrBranchIsProtected
}
}
// 4. Update all not merged pull request base branch name
_, err = sess.Table("pull_request").Where("base_repo_id=? AND base_branch=? AND has_merged=?",
repo.ID, from, false).
Update(map[string]any{"base_branch": to})
if err != nil {
return err
}
// 5. do git action
if err = gitAction(isDefault); err != nil {
return err
}
// 6. insert renamed branch record
renamedBranch := &RenamedBranch{
RepoID: repo.ID,
From: from,
To: to,
}
err = db.Insert(ctx, renamedBranch)
if err != nil {
return err
}
return committer.Commit()
}
// FindRecentlyPushedNewBranches return at most 2 new branches pushed by the user in 6 hours which has no opened PRs created
func FindRecentlyPushedNewBranches(ctx context.Context, repoID, userID int64) (BranchList, error) {
branches := make(BranchList, 0, 2)
subQuery := builder.Select("head_branch").From("pull_request").
InnerJoin("issue", "issue.id = pull_request.issue_id").
Where(builder.Eq{
"pull_request.head_repo_id": repoID,
"issue.is_closed": false,
})
err := db.GetEngine(ctx).
Where("pusher_id=? AND is_deleted=?", userID, false).
And("updated_unix >= ?", time.Now().Add(-time.Hour*6).Unix()).
NotIn("name", subQuery).
OrderBy("branch.updated_unix DESC").
Limit(2).
Find(&branches)
return branches, err
}

145
models/git/branch_list.go Normal file
View File

@ -0,0 +1,145 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package git
import (
"context"
"code.gitea.io/gitea/models/db"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
"xorm.io/xorm"
)
type BranchList []*Branch
func (branches BranchList) LoadDeletedBy(ctx context.Context) error {
ids := container.Set[int64]{}
for _, branch := range branches {
if !branch.IsDeleted {
continue
}
ids.Add(branch.DeletedByID)
}
usersMap := make(map[int64]*user_model.User, len(ids))
if err := db.GetEngine(ctx).In("id", ids.Values()).Find(&usersMap); err != nil {
return err
}
for _, branch := range branches {
if !branch.IsDeleted {
continue
}
branch.DeletedBy = usersMap[branch.DeletedByID]
if branch.DeletedBy == nil {
branch.DeletedBy = user_model.NewGhostUser()
}
}
return nil
}
func (branches BranchList) LoadPusher(ctx context.Context) error {
ids := container.Set[int64]{}
for _, branch := range branches {
if branch.PusherID > 0 { // pusher_id maybe zero because some branches are sync by backend with no pusher
ids.Add(branch.PusherID)
}
}
usersMap := make(map[int64]*user_model.User, len(ids))
if err := db.GetEngine(ctx).In("id", ids.Values()).Find(&usersMap); err != nil {
return err
}
for _, branch := range branches {
if branch.PusherID <= 0 {
continue
}
branch.Pusher = usersMap[branch.PusherID]
if branch.Pusher == nil {
branch.Pusher = user_model.NewGhostUser()
}
}
return nil
}
type FindBranchOptions struct {
db.ListOptions
RepoID int64
ExcludeBranchNames []string
IsDeletedBranch util.OptionalBool
OrderBy string
}
func (opts *FindBranchOptions) Cond() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if len(opts.ExcludeBranchNames) > 0 {
cond = cond.And(builder.NotIn("name", opts.ExcludeBranchNames))
}
if !opts.IsDeletedBranch.IsNone() {
cond = cond.And(builder.Eq{"is_deleted": opts.IsDeletedBranch.IsTrue()})
}
return cond
}
func CountBranches(ctx context.Context, opts FindBranchOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.Cond()).Count(&Branch{})
}
func orderByBranches(sess *xorm.Session, opts FindBranchOptions) *xorm.Session {
if !opts.IsDeletedBranch.IsFalse() { // if deleted branch included, put them at the end
sess = sess.OrderBy("is_deleted ASC")
}
if opts.OrderBy == "" {
// the commit_time might be the same, so add the "name" to make sure the order is stable
opts.OrderBy = "commit_time DESC, name ASC"
}
return sess.OrderBy(opts.OrderBy)
}
func FindBranches(ctx context.Context, opts FindBranchOptions) (BranchList, error) {
sess := db.GetEngine(ctx).Where(opts.Cond())
if opts.PageSize > 0 && !opts.IsListAll() {
sess = db.SetSessionPagination(sess, &opts.ListOptions)
}
sess = orderByBranches(sess, opts)
var branches []*Branch
return branches, sess.Find(&branches)
}
func FindBranchNames(ctx context.Context, opts FindBranchOptions) ([]string, error) {
sess := db.GetEngine(ctx).Select("name").Where(opts.Cond())
if opts.PageSize > 0 && !opts.IsListAll() {
sess = db.SetSessionPagination(sess, &opts.ListOptions)
}
sess = orderByBranches(sess, opts)
var branches []string
if err := sess.Table("branch").Find(&branches); err != nil {
return nil, err
}
return branches, nil
}
func FindBranchesByRepoAndBranchName(ctx context.Context, repoBranches map[int64]string) (map[int64]string, error) {
cond := builder.NewCond()
for repoID, branchName := range repoBranches {
cond = cond.Or(builder.And(builder.Eq{"repo_id": repoID}, builder.Eq{"name": branchName}))
}
var branches []*Branch
if err := db.GetEngine(ctx).
Where(cond).Find(&branches); err != nil {
return nil, err
}
branchMap := make(map[int64]string, len(branches))
for _, branch := range branches {
branchMap[branch.RepoID] = branch.CommitID
}
return branchMap, nil
}

View File

@ -11,6 +11,8 @@ import (
issues_model "code.gitea.io/gitea/models/issues"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/util"
"github.com/stretchr/testify/assert"
)
@ -18,24 +20,45 @@ import (
func TestAddDeletedBranch(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1})
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
assert.Error(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, firstBranch.Name, firstBranch.Commit, firstBranch.DeletedByID))
assert.NoError(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, "test", "5655464564554545466464656", int64(1)))
assert.True(t, firstBranch.IsDeleted)
assert.NoError(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, firstBranch.Name, firstBranch.DeletedByID))
assert.NoError(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, "branch2", int64(1)))
secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo.ID, Name: "branch2"})
assert.True(t, secondBranch.IsDeleted)
commit := &git.Commit{
ID: git.MustIDFromString(secondBranch.CommitID),
CommitMessage: secondBranch.CommitMessage,
Committer: &git.Signature{
When: secondBranch.CommitTime.AsLocalTime(),
},
}
err := git_model.UpdateBranch(db.DefaultContext, repo.ID, secondBranch.PusherID, secondBranch.Name, commit)
assert.NoError(t, err)
}
func TestGetDeletedBranches(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
branches, err := git_model.GetDeletedBranches(db.DefaultContext, repo.ID)
branches, err := git_model.FindBranches(db.DefaultContext, git_model.FindBranchOptions{
ListOptions: db.ListOptions{
ListAll: true,
},
RepoID: repo.ID,
IsDeletedBranch: util.OptionalBoolTrue,
})
assert.NoError(t, err)
assert.Len(t, branches, 2)
}
func TestGetDeletedBranch(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1})
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
assert.NotNil(t, getDeletedBranch(t, firstBranch))
}
@ -43,18 +66,18 @@ func TestGetDeletedBranch(t *testing.T) {
func TestDeletedBranchLoadUser(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1})
secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 2})
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 2})
branch := getDeletedBranch(t, firstBranch)
assert.Nil(t, branch.DeletedBy)
branch.LoadUser(db.DefaultContext)
branch.LoadDeletedBy(db.DefaultContext)
assert.NotNil(t, branch.DeletedBy)
assert.Equal(t, "user1", branch.DeletedBy.Name)
branch = getDeletedBranch(t, secondBranch)
assert.Nil(t, branch.DeletedBy)
branch.LoadUser(db.DefaultContext)
branch.LoadDeletedBy(db.DefaultContext)
assert.NotNil(t, branch.DeletedBy)
assert.Equal(t, "Ghost", branch.DeletedBy.Name)
}
@ -63,22 +86,22 @@ func TestRemoveDeletedBranch(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1})
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
err := git_model.RemoveDeletedBranchByID(db.DefaultContext, repo.ID, 1)
assert.NoError(t, err)
unittest.AssertNotExistsBean(t, firstBranch)
unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 2})
unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 2})
}
func getDeletedBranch(t *testing.T, branch *git_model.DeletedBranch) *git_model.DeletedBranch {
func getDeletedBranch(t *testing.T, branch *git_model.Branch) *git_model.Branch {
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
deletedBranch, err := git_model.GetDeletedBranchByID(db.DefaultContext, repo.ID, branch.ID)
assert.NoError(t, err)
assert.Equal(t, branch.ID, deletedBranch.ID)
assert.Equal(t, branch.Name, deletedBranch.Name)
assert.Equal(t, branch.Commit, deletedBranch.Commit)
assert.Equal(t, branch.CommitID, deletedBranch.CommitID)
assert.Equal(t, branch.DeletedByID, deletedBranch.DeletedByID)
return deletedBranch
@ -146,8 +169,8 @@ func TestOnlyGetDeletedBranchOnCorrectRepo(t *testing.T) {
deletedBranch, err := git_model.GetDeletedBranchByID(db.DefaultContext, repo2.ID, 1)
// Expect no error, and the returned branch is nil.
assert.NoError(t, err)
// Expect error, and the returned branch is nil.
assert.Error(t, err)
assert.Nil(t, deletedBranch)
// Now get the deletedBranch with ID of 1 on repo with ID 1.

View File

@ -1,197 +0,0 @@
// Copyright 2016 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package git
import (
"context"
"fmt"
"time"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
)
// DeletedBranch struct
type DeletedBranch struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"UNIQUE(s) NOT NULL"`
Commit string `xorm:"UNIQUE(s) NOT NULL"`
DeletedByID int64 `xorm:"INDEX"`
DeletedBy *user_model.User `xorm:"-"`
DeletedUnix timeutil.TimeStamp `xorm:"INDEX created"`
}
func init() {
db.RegisterModel(new(DeletedBranch))
db.RegisterModel(new(RenamedBranch))
}
// AddDeletedBranch adds a deleted branch to the database
func AddDeletedBranch(ctx context.Context, repoID int64, branchName, commit string, deletedByID int64) error {
deletedBranch := &DeletedBranch{
RepoID: repoID,
Name: branchName,
Commit: commit,
DeletedByID: deletedByID,
}
_, err := db.GetEngine(ctx).Insert(deletedBranch)
return err
}
// GetDeletedBranches returns all the deleted branches
func GetDeletedBranches(ctx context.Context, repoID int64) ([]*DeletedBranch, error) {
deletedBranches := make([]*DeletedBranch, 0)
return deletedBranches, db.GetEngine(ctx).Where("repo_id = ?", repoID).Desc("deleted_unix").Find(&deletedBranches)
}
// GetDeletedBranchByID get a deleted branch by its ID
func GetDeletedBranchByID(ctx context.Context, repoID, id int64) (*DeletedBranch, error) {
deletedBranch := &DeletedBranch{}
has, err := db.GetEngine(ctx).Where("repo_id = ?", repoID).And("id = ?", id).Get(deletedBranch)
if err != nil {
return nil, err
}
if !has {
return nil, nil
}
return deletedBranch, nil
}
// RemoveDeletedBranchByID removes a deleted branch from the database
func RemoveDeletedBranchByID(ctx context.Context, repoID, id int64) (err error) {
deletedBranch := &DeletedBranch{
RepoID: repoID,
ID: id,
}
if affected, err := db.GetEngine(ctx).Delete(deletedBranch); err != nil {
return err
} else if affected != 1 {
return fmt.Errorf("remove deleted branch ID(%v) failed", id)
}
return nil
}
// LoadUser loads the user that deleted the branch
// When there's no user found it returns a user_model.NewGhostUser
func (deletedBranch *DeletedBranch) LoadUser(ctx context.Context) {
user, err := user_model.GetUserByID(ctx, deletedBranch.DeletedByID)
if err != nil {
user = user_model.NewGhostUser()
}
deletedBranch.DeletedBy = user
}
// RemoveDeletedBranchByName removes all deleted branches
func RemoveDeletedBranchByName(ctx context.Context, repoID int64, branch string) error {
_, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repoID, branch).Delete(new(DeletedBranch))
return err
}
// RemoveOldDeletedBranches removes old deleted branches
func RemoveOldDeletedBranches(ctx context.Context, olderThan time.Duration) {
// Nothing to do for shutdown or terminate
log.Trace("Doing: DeletedBranchesCleanup")
deleteBefore := time.Now().Add(-olderThan)
_, err := db.GetEngine(ctx).Where("deleted_unix < ?", deleteBefore.Unix()).Delete(new(DeletedBranch))
if err != nil {
log.Error("DeletedBranchesCleanup: %v", err)
}
}
// RenamedBranch provide renamed branch log
// will check it when a branch can't be found
type RenamedBranch struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX NOT NULL"`
From string
To string
CreatedUnix timeutil.TimeStamp `xorm:"created"`
}
// FindRenamedBranch check if a branch was renamed
func FindRenamedBranch(ctx context.Context, repoID int64, from string) (branch *RenamedBranch, exist bool, err error) {
branch = &RenamedBranch{
RepoID: repoID,
From: from,
}
exist, err = db.GetEngine(ctx).Get(branch)
return branch, exist, err
}
// RenameBranch rename a branch
func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to string, gitAction func(isDefault bool) error) (err error) {
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
}
defer committer.Close()
sess := db.GetEngine(ctx)
// 1. update default branch if needed
isDefault := repo.DefaultBranch == from
if isDefault {
repo.DefaultBranch = to
_, err = sess.ID(repo.ID).Cols("default_branch").Update(repo)
if err != nil {
return err
}
}
// 2. Update protected branch if needed
protectedBranch, err := GetProtectedBranchRuleByName(ctx, repo.ID, from)
if err != nil {
return err
}
if protectedBranch != nil {
protectedBranch.RuleName = to
_, err = sess.ID(protectedBranch.ID).Cols("branch_name").Update(protectedBranch)
if err != nil {
return err
}
} else {
protected, err := IsBranchProtected(ctx, repo.ID, from)
if err != nil {
return err
}
if protected {
return ErrBranchIsProtected
}
}
// 3. Update all not merged pull request base branch name
_, err = sess.Table("pull_request").Where("base_repo_id=? AND base_branch=? AND has_merged=?",
repo.ID, from, false).
Update(map[string]interface{}{"base_branch": to})
if err != nil {
return err
}
// 4. do git action
if err = gitAction(isDefault); err != nil {
return err
}
// 5. insert renamed branch record
renamedBranch := &RenamedBranch{
RepoID: repo.ID,
From: from,
To: to,
}
err = db.Insert(ctx, renamedBranch)
if err != nil {
return err
}
return committer.Commit()
}

View File

@ -346,6 +346,53 @@ func GetLatestCommitStatusForPairs(ctx context.Context, repoIDsToLatestCommitSHA
return repoStatuses, nil
}
// GetLatestCommitStatusForRepoCommitIDs returns all statuses with a unique context for a given list of repo-sha pairs
func GetLatestCommitStatusForRepoCommitIDs(ctx context.Context, repoID int64, commitIDs []string) (map[string][]*CommitStatus, error) {
type result struct {
ID int64
Sha string
}
results := make([]result, 0, len(commitIDs))
sess := db.GetEngine(ctx).Table(&CommitStatus{})
// Create a disjunction of conditions for each repoID and SHA pair
conds := make([]builder.Cond, 0, len(commitIDs))
for _, sha := range commitIDs {
conds = append(conds, builder.Eq{"sha": sha})
}
sess = sess.Where(builder.Eq{"repo_id": repoID}.And(builder.Or(conds...))).
Select("max( id ) as id, sha").
GroupBy("context_hash, sha").OrderBy("max( id ) desc")
err := sess.Find(&results)
if err != nil {
return nil, err
}
ids := make([]int64, 0, len(results))
repoStatuses := make(map[string][]*CommitStatus)
for _, result := range results {
ids = append(ids, result.ID)
}
statuses := make([]*CommitStatus, 0, len(ids))
if len(ids) > 0 {
err = db.GetEngine(ctx).In("id", ids).Find(&statuses)
if err != nil {
return nil, err
}
// Group the statuses by repo ID
for _, status := range statuses {
repoStatuses[status.SHA] = append(repoStatuses[status.SHA], status)
}
}
return repoStatuses, nil
}
// FindRepoRecentCommitStatusContexts returns repository's recent commit status contexts
func FindRepoRecentCommitStatusContexts(ctx context.Context, repoID int64, before time.Duration) ([]string, error) {
start := timeutil.TimeStampNow().AddDuration(-before)

View File

@ -264,7 +264,7 @@ func LFSAutoAssociate(ctx context.Context, metas []*LFSMetaObject, user *user_mo
sess := db.GetEngine(ctx)
oids := make([]interface{}, len(metas))
oids := make([]any, len(metas))
oidMap := make(map[string]*LFSMetaObject, len(metas))
for i, meta := range metas {
oids[i] = meta.Oid

View File

@ -8,7 +8,7 @@ import (
"sort"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/util"
"github.com/gobwas/glob"
)
@ -47,19 +47,32 @@ func FindRepoProtectedBranchRules(ctx context.Context, repoID int64) (ProtectedB
}
// FindAllMatchedBranches find all matched branches
func FindAllMatchedBranches(ctx context.Context, gitRepo *git.Repository, ruleName string) ([]string, error) {
// FIXME: how many should we get?
branches, _, err := gitRepo.GetBranchNames(0, 9999999)
if err != nil {
return nil, err
}
rule := glob.MustCompile(ruleName)
results := make([]string, 0, len(branches))
for _, branch := range branches {
if rule.Match(branch) {
results = append(results, branch)
func FindAllMatchedBranches(ctx context.Context, repoID int64, ruleName string) ([]string, error) {
results := make([]string, 0, 10)
for page := 1; ; page++ {
brancheNames, err := FindBranchNames(ctx, FindBranchOptions{
ListOptions: db.ListOptions{
PageSize: 100,
Page: page,
},
RepoID: repoID,
IsDeletedBranch: util.OptionalBoolFalse,
})
if err != nil {
return nil, err
}
rule := glob.MustCompile(ruleName)
for _, branch := range brancheNames {
if rule.Match(branch) {
results = append(results, branch)
}
}
if len(brancheNames) < 100 {
break
}
}
return results, nil
}

View File

@ -752,7 +752,7 @@ func (c *Comment) LoadPushCommits(ctx context.Context) (err error) {
err = json.Unmarshal([]byte(c.Content), &data)
if err != nil {
return
return err
}
c.IsForcePush = data.IsForcePush
@ -928,7 +928,7 @@ func createIssueDependencyComment(ctx context.Context, doer *user_model.User, is
cType = CommentTypeRemoveDependency
}
if err = issue.LoadRepo(ctx); err != nil {
return
return err
}
// Make two comments, one in each issue
@ -940,7 +940,7 @@ func createIssueDependencyComment(ctx context.Context, doer *user_model.User, is
DependentIssueID: dependentIssue.ID,
}
if _, err = CreateComment(ctx, opts); err != nil {
return
return err
}
opts = &CreateCommentOptions{
@ -1134,7 +1134,7 @@ func DeleteComment(ctx context.Context, comment *Comment) error {
}
if _, err := e.Table("action").
Where("comment_id = ?", comment.ID).
Update(map[string]interface{}{
Update(map[string]any{
"is_deleted": true,
}); err != nil {
return err
@ -1159,7 +1159,7 @@ func UpdateCommentsMigrationsByType(tp structs.GitServiceType, originalAuthorID
}),
)).
And("comment.original_author_id = ?", originalAuthorID).
Update(map[string]interface{}{
Update(map[string]any{
"poster_id": posterID,
"original_author": "",
"original_author_id": 0,
@ -1173,11 +1173,11 @@ func CreateAutoMergeComment(ctx context.Context, typ CommentType, pr *PullReques
return nil, fmt.Errorf("comment type %d cannot be used to create an auto merge comment", typ)
}
if err = pr.LoadIssue(ctx); err != nil {
return
return nil, err
}
if err = pr.LoadBaseRepo(ctx); err != nil {
return
return nil, err
}
comment, err = CreateComment(ctx, &CreateCommentOptions{

View File

@ -468,42 +468,38 @@ func (comments CommentList) loadReviews(ctx context.Context) error {
// loadAttributes loads all attributes
func (comments CommentList) loadAttributes(ctx context.Context) (err error) {
if err = comments.LoadPosters(ctx); err != nil {
return
return err
}
if err = comments.loadLabels(ctx); err != nil {
return
return err
}
if err = comments.loadMilestones(ctx); err != nil {
return
return err
}
if err = comments.loadOldMilestones(ctx); err != nil {
return
return err
}
if err = comments.loadAssignees(ctx); err != nil {
return
return err
}
if err = comments.LoadAttachments(ctx); err != nil {
return
return err
}
if err = comments.loadReviews(ctx); err != nil {
return
return err
}
if err = comments.LoadIssues(ctx); err != nil {
return
return err
}
if err = comments.loadDependentIssues(ctx); err != nil {
return
}
return nil
return comments.loadDependentIssues(ctx)
}
// LoadAttributes loads attributes of the comments, except for attachments and

View File

@ -225,8 +225,7 @@ func (issue *Issue) LoadPoster(ctx context.Context) (err error) {
if !user_model.IsErrUserNotExist(err) {
return fmt.Errorf("getUserByID.(poster) [%d]: %w", issue.PosterID, err)
}
err = nil
return
return nil
}
}
return err
@ -319,27 +318,27 @@ func (issue *Issue) LoadMilestone(ctx context.Context) (err error) {
// LoadAttributes loads the attribute of this issue.
func (issue *Issue) LoadAttributes(ctx context.Context) (err error) {
if err = issue.LoadRepo(ctx); err != nil {
return
return err
}
if err = issue.LoadPoster(ctx); err != nil {
return
return err
}
if err = issue.LoadLabels(ctx); err != nil {
return
return err
}
if err = issue.LoadMilestone(ctx); err != nil {
return
return err
}
if err = issue.LoadProject(ctx); err != nil {
return
return err
}
if err = issue.LoadAssignees(ctx); err != nil {
return
return err
}
if err = issue.LoadPullRequest(ctx); err != nil && !IsErrPullRequestNotExist(err) {
@ -717,7 +716,7 @@ func (issue *Issue) Pin(ctx context.Context, user *user_model.User) error {
_, err = db.GetEngine(ctx).Table("issue").
Where("id = ?", issue.ID).
Update(map[string]interface{}{
Update(map[string]any{
"pin_order": maxPin + 1,
})
if err != nil {
@ -753,7 +752,7 @@ func (issue *Issue) Unpin(ctx context.Context, user *user_model.User) error {
_, err = db.GetEngine(ctx).Table("issue").
Where("id = ?", issue.ID).
Update(map[string]interface{}{
Update(map[string]any{
"pin_order": 0,
})
if err != nil {
@ -825,7 +824,7 @@ func (issue *Issue) MovePin(ctx context.Context, newPosition int) error {
_, err = db.GetEngine(dbctx).Table("issue").
Where("id = ?", issue.ID).
Update(map[string]interface{}{
Update(map[string]any{
"pin_order": newPosition,
})
if err != nil {

View File

@ -39,7 +39,7 @@ func newIssueLabel(ctx context.Context, issue *Issue, label *Label, doer *user_m
}
if err = issue.LoadRepo(ctx); err != nil {
return
return err
}
opts := &CreateCommentOptions{
@ -168,7 +168,7 @@ func deleteIssueLabel(ctx context.Context, issue *Issue, label *Label, doer *use
}
if err = issue.LoadRepo(ctx); err != nil {
return
return err
}
opts := &CreateCommentOptions{

View File

@ -511,7 +511,7 @@ func UpdateIssueDeadline(issue *Issue, deadlineUnix timeutil.TimeStamp, doer *us
}
// DeleteInIssue delete records in beans with external key issue_id = ?
func DeleteInIssue(ctx context.Context, issueID int64, beans ...interface{}) error {
func DeleteInIssue(ctx context.Context, issueID int64, beans ...any) error {
e := db.GetEngine(ctx)
for _, bean := range beans {
if _, err := e.In("issue_id", issueID).Delete(bean); err != nil {
@ -538,10 +538,10 @@ func FindAndUpdateIssueMentions(ctx context.Context, issue *Issue, doer *user_mo
// don't have access to reading it. Teams are expanded into their users, but organizations are ignored.
func ResolveIssueMentionsByVisibility(ctx context.Context, issue *Issue, doer *user_model.User, mentions []string) (users []*user_model.User, err error) {
if len(mentions) == 0 {
return
return nil, nil
}
if err = issue.LoadRepo(ctx); err != nil {
return
return nil, err
}
resolved := make(map[string]bool, 10)
@ -635,7 +635,7 @@ func ResolveIssueMentionsByVisibility(ctx context.Context, issue *Issue, doer *u
}
}
if len(mentionUsers) == 0 {
return
return users, err
}
if users == nil {
@ -673,7 +673,7 @@ func UpdateIssuesMigrationsByType(gitServiceType api.GitServiceType, originalAut
_, err := db.GetEngine(db.DefaultContext).Table("issue").
Where("repo_id IN (SELECT id FROM repository WHERE original_service_type = ?)", gitServiceType).
And("original_author_id = ?", originalAuthorID).
Update(map[string]interface{}{
Update(map[string]any{
"poster_id": posterID,
"original_author": "",
"original_author_id": 0,
@ -686,7 +686,7 @@ func UpdateReactionsMigrationsByType(gitServiceType api.GitServiceType, original
_, err := db.GetEngine(db.DefaultContext).Table("reaction").
Where("original_author_id = ?", originalAuthorID).
And(migratedIssueCond(gitServiceType)).
Update(map[string]interface{}{
Update(map[string]any{
"user_id": userID,
"original_author": "",
"original_author_id": 0,
@ -702,66 +702,66 @@ func DeleteIssuesByRepoID(ctx context.Context, repoID int64) (attachmentPaths []
// Delete content histories
if _, err = sess.In("issue_id", deleteCond).
Delete(&ContentHistory{}); err != nil {
return
return nil, err
}
// Delete comments and attachments
if _, err = sess.In("issue_id", deleteCond).
Delete(&Comment{}); err != nil {
return
return nil, err
}
// Dependencies for issues in this repository
if _, err = sess.In("issue_id", deleteCond).
Delete(&IssueDependency{}); err != nil {
return
return nil, err
}
// Delete dependencies for issues in other repositories
if _, err = sess.In("dependency_id", deleteCond).
Delete(&IssueDependency{}); err != nil {
return
return nil, err
}
if _, err = sess.In("issue_id", deleteCond).
Delete(&IssueUser{}); err != nil {
return
return nil, err
}
if _, err = sess.In("issue_id", deleteCond).
Delete(&Reaction{}); err != nil {
return
return nil, err
}
if _, err = sess.In("issue_id", deleteCond).
Delete(&IssueWatch{}); err != nil {
return
return nil, err
}
if _, err = sess.In("issue_id", deleteCond).
Delete(&Stopwatch{}); err != nil {
return
return nil, err
}
if _, err = sess.In("issue_id", deleteCond).
Delete(&TrackedTime{}); err != nil {
return
return nil, err
}
if _, err = sess.In("issue_id", deleteCond).
Delete(&project_model.ProjectIssue{}); err != nil {
return
return nil, err
}
if _, err = sess.In("dependent_issue_id", deleteCond).
Delete(&Comment{}); err != nil {
return
return nil, err
}
var attachments []*repo_model.Attachment
if err = sess.In("issue_id", deleteCond).
Find(&attachments); err != nil {
return
return nil, err
}
for j := range attachments {
@ -770,11 +770,11 @@ func DeleteIssuesByRepoID(ctx context.Context, repoID int64) (attachmentPaths []
if _, err = sess.In("issue_id", deleteCond).
Delete(&repo_model.Attachment{}); err != nil {
return
return nil, err
}
if _, err = db.DeleteByBean(ctx, &Issue{RepoID: repoID}); err != nil {
return
return nil, err
}
return attachmentPaths, err

View File

@ -136,10 +136,10 @@ func init() {
// LoadCodeComments loads CodeComments
func (r *Review) LoadCodeComments(ctx context.Context) (err error) {
if r.CodeComments != nil {
return
return err
}
if err = r.loadIssue(ctx); err != nil {
return
return err
}
r.CodeComments, err = fetchCodeCommentsByReview(ctx, r.Issue, nil, r, false)
return err
@ -147,7 +147,7 @@ func (r *Review) LoadCodeComments(ctx context.Context) (err error) {
func (r *Review) loadIssue(ctx context.Context) (err error) {
if r.Issue != nil {
return
return err
}
r.Issue, err = GetIssueByID(ctx, r.IssueID)
return err
@ -156,7 +156,7 @@ func (r *Review) loadIssue(ctx context.Context) (err error) {
// LoadReviewer loads reviewer
func (r *Review) LoadReviewer(ctx context.Context) (err error) {
if r.ReviewerID == 0 || r.Reviewer != nil {
return
return err
}
r.Reviewer, err = user_model.GetPossibleUserByID(ctx, r.ReviewerID)
return err
@ -186,7 +186,7 @@ func LoadReviewers(ctx context.Context, reviews []*Review) (err error) {
// LoadReviewerTeam loads reviewer team
func (r *Review) LoadReviewerTeam(ctx context.Context) (err error) {
if r.ReviewerTeamID == 0 || r.ReviewerTeam != nil {
return
return nil
}
r.ReviewerTeam, err = organization.GetTeamByID(ctx, r.ReviewerTeamID)
@ -196,16 +196,16 @@ func (r *Review) LoadReviewerTeam(ctx context.Context) (err error) {
// LoadAttributes loads all attributes except CodeComments
func (r *Review) LoadAttributes(ctx context.Context) (err error) {
if err = r.loadIssue(ctx); err != nil {
return
return err
}
if err = r.LoadCodeComments(ctx); err != nil {
return
return err
}
if err = r.LoadReviewer(ctx); err != nil {
return
return err
}
if err = r.LoadReviewerTeam(ctx); err != nil {
return
return err
}
return err
}
@ -1111,7 +1111,7 @@ func UpdateReviewsMigrationsByType(tp structs.GitServiceType, originalAuthorID s
_, err := db.GetEngine(db.DefaultContext).Table("review").
Where("original_author_id = ?", originalAuthorID).
And(migratedIssueCond(tp)).
Update(map[string]interface{}{
Update(map[string]any{
"reviewer_id": posterID,
"original_author": "",
"original_author_id": 0,

View File

@ -302,7 +302,7 @@ func DeleteTime(t *TrackedTime) error {
func deleteTimes(ctx context.Context, opts FindTrackedTimesOptions) (removedTime int64, err error) {
removedTime, err = GetTrackedSeconds(ctx, opts)
if err != nil || removedTime == 0 {
return
return removedTime, err
}
_, err = opts.toSession(db.GetEngine(ctx)).Table("tracked_time").Cols("deleted").Update(&TrackedTime{Deleted: true})

View File

@ -27,7 +27,7 @@ import (
// RecreateTables will recreate the tables for the provided beans using the newly provided bean definition and move all data to that new table
// WARNING: YOU MUST PROVIDE THE FULL BEAN DEFINITION
func RecreateTables(beans ...interface{}) func(*xorm.Engine) error {
func RecreateTables(beans ...any) func(*xorm.Engine) error {
return func(x *xorm.Engine) error {
sess := x.NewSession()
defer sess.Close()
@ -48,7 +48,7 @@ func RecreateTables(beans ...interface{}) func(*xorm.Engine) error {
// RecreateTable will recreate the table using the newly provided bean definition and move all data to that new table
// WARNING: YOU MUST PROVIDE THE FULL BEAN DEFINITION
// WARNING: YOU MUST COMMIT THE SESSION AT THE END
func RecreateTable(sess *xorm.Session, bean interface{}) error {
func RecreateTable(sess *xorm.Session, bean any) error {
// TODO: This will not work if there are foreign keys
tableName := sess.Engine().TableName(bean)

View File

@ -30,7 +30,7 @@ import (
// Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from.
//
// fixtures in `models/migrations/fixtures/<TestName>` will be loaded automatically
func PrepareTestEnv(t *testing.T, skip int, syncModels ...interface{}) (*xorm.Engine, func()) {
func PrepareTestEnv(t *testing.T, skip int, syncModels ...any) (*xorm.Engine, func()) {
t.Helper()
ourSkip := 2
ourSkip += skip

View File

@ -508,6 +508,10 @@ var migrations = []Migration{
// v262 -> v263
NewMigration("Add TriggerEvent to action_run table", v1_21.AddTriggerEventToActionRun),
// v263 -> v264
NewMigration("Add git_size and lfs_size columns to repository table", v1_21.AddGitSizeAndLFSSizeToRepositoryTable),
// v264 -> v265
NewMigration("Add branch table", v1_21.AddBranchTable),
// v265 -> v266
NewMigration("Add TimeEstimate to issue table", v1_21.AddTimeEstimateColumnToIssueTable),
}

View File

@ -59,11 +59,11 @@ func UpdateMigrationServiceTypes(x *xorm.Engine) error {
}
type ExternalLoginUser struct {
ExternalID string `xorm:"pk NOT NULL"`
UserID int64 `xorm:"INDEX NOT NULL"`
LoginSourceID int64 `xorm:"pk NOT NULL"`
RawData map[string]interface{} `xorm:"TEXT JSON"`
Provider string `xorm:"index VARCHAR(25)"`
ExternalID string `xorm:"pk NOT NULL"`
UserID int64 `xorm:"INDEX NOT NULL"`
LoginSourceID int64 `xorm:"pk NOT NULL"`
RawData map[string]any `xorm:"TEXT JSON"`
Provider string `xorm:"index VARCHAR(25)"`
Email string
Name string
FirstName string

View File

@ -16,10 +16,10 @@ type Watch struct {
Mode RepoWatchMode `xorm:"SMALLINT NOT NULL DEFAULT 1"`
}
func AddModeColumnToWatch(x *xorm.Engine) (err error) {
if err = x.Sync2(new(Watch)); err != nil {
return
func AddModeColumnToWatch(x *xorm.Engine) error {
if err := x.Sync2(new(Watch)); err != nil {
return err
}
_, err = x.Exec("UPDATE `watch` SET `mode` = 1")
_, err := x.Exec("UPDATE `watch` SET `mode` = 1")
return err
}

View File

@ -23,25 +23,25 @@ func RecalculateStars(x *xorm.Engine) (err error) {
for start := 0; ; start += batchSize {
users := make([]User, 0, batchSize)
if err = sess.Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil {
return
if err := sess.Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil {
return err
}
if len(users) == 0 {
break
}
if err = sess.Begin(); err != nil {
return
if err := sess.Begin(); err != nil {
return err
}
for _, user := range users {
if _, err = sess.Exec("UPDATE `user` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE uid=?) WHERE id=?", user.ID, user.ID); err != nil {
return
if _, err := sess.Exec("UPDATE `user` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE uid=?) WHERE id=?", user.ID, user.ID); err != nil {
return err
}
}
if err = sess.Commit(); err != nil {
return
if err := sess.Commit(); err != nil {
return err
}
}

View File

@ -78,14 +78,14 @@ func RecalculateUserEmptyPWD(x *xorm.Engine) (err error) {
for start := 0; ; start += batchSize {
users := make([]*User, 0, batchSize)
if err = sess.Limit(batchSize, start).Where(builder.Neq{"passwd": ""}, 0).Find(&users); err != nil {
return
return err
}
if len(users) == 0 {
break
}
if err = sess.Begin(); err != nil {
return
return err
}
for _, user := range users {
@ -100,7 +100,7 @@ func RecalculateUserEmptyPWD(x *xorm.Engine) (err error) {
}
if err = sess.Commit(); err != nil {
return
return err
}
}

View File

@ -44,25 +44,25 @@ func DeleteMigrationCredentials(x *xorm.Engine) (err error) {
for start := 0; ; start += batchSize {
tasks := make([]*Task, 0, batchSize)
if err = sess.Limit(batchSize, start).Where(cond, 0).Find(&tasks); err != nil {
return
if err := sess.Limit(batchSize, start).Where(cond, 0).Find(&tasks); err != nil {
return err
}
if len(tasks) == 0 {
break
}
if err = sess.Begin(); err != nil {
return
if err := sess.Begin(); err != nil {
return err
}
for _, t := range tasks {
if t.PayloadContent, err = removeCredentials(t.PayloadContent); err != nil {
return
return err
}
if _, err = sess.ID(t.ID).Cols("payload_content").Update(t); err != nil {
return
if _, err := sess.ID(t.ID).Cols("payload_content").Update(t); err != nil {
return err
}
}
if err = sess.Commit(); err != nil {
return
if err := sess.Commit(); err != nil {
return err
}
}
return err

View File

@ -9,7 +9,7 @@ import (
"xorm.io/xorm"
)
func AddPrimaryEmail2EmailAddress(x *xorm.Engine) (err error) {
func AddPrimaryEmail2EmailAddress(x *xorm.Engine) error {
type User struct {
ID int64 `xorm:"pk autoincr"`
Email string `xorm:"NOT NULL"`
@ -26,12 +26,12 @@ func AddPrimaryEmail2EmailAddress(x *xorm.Engine) (err error) {
}
// Add lower_email and is_primary columns
if err = x.Table("email_address").Sync2(new(EmailAddress1)); err != nil {
return
if err := x.Table("email_address").Sync2(new(EmailAddress1)); err != nil {
return err
}
if _, err = x.Exec("UPDATE email_address SET lower_email=LOWER(email), is_primary=?", false); err != nil {
return
if _, err := x.Exec("UPDATE email_address SET lower_email=LOWER(email), is_primary=?", false); err != nil {
return err
}
type EmailAddress struct {
@ -44,8 +44,8 @@ func AddPrimaryEmail2EmailAddress(x *xorm.Engine) (err error) {
}
// change lower_email as unique
if err = x.Sync2(new(EmailAddress)); err != nil {
return
if err := x.Sync2(new(EmailAddress)); err != nil {
return err
}
sess := x.NewSession()
@ -55,34 +55,33 @@ func AddPrimaryEmail2EmailAddress(x *xorm.Engine) (err error) {
for start := 0; ; start += batchSize {
users := make([]*User, 0, batchSize)
if err = sess.Limit(batchSize, start).Find(&users); err != nil {
return
if err := sess.Limit(batchSize, start).Find(&users); err != nil {
return err
}
if len(users) == 0 {
break
}
for _, user := range users {
var exist bool
exist, err = sess.Where("email=?", user.Email).Table("email_address").Exist()
exist, err := sess.Where("email=?", user.Email).Table("email_address").Exist()
if err != nil {
return
return err
}
if !exist {
if _, err = sess.Insert(&EmailAddress{
if _, err := sess.Insert(&EmailAddress{
UID: user.ID,
Email: user.Email,
LowerEmail: strings.ToLower(user.Email),
IsActivated: user.IsActive,
IsPrimary: true,
}); err != nil {
return
return err
}
} else {
if _, err = sess.Where("email=?", user.Email).Cols("is_primary").Update(&EmailAddress{
if _, err := sess.Where("email=?", user.Email).Cols("is_primary").Update(&EmailAddress{
IsPrimary: true,
}); err != nil {
return
return err
}
}
}

View File

@ -14,7 +14,7 @@ import (
)
func UnwrapLDAPSourceCfg(x *xorm.Engine) error {
jsonUnmarshalHandleDoubleEncode := func(bs []byte, v interface{}) error {
jsonUnmarshalHandleDoubleEncode := func(bs []byte, v any) error {
err := json.Unmarshal(bs, v)
if err != nil {
ok := true
@ -54,7 +54,7 @@ func UnwrapLDAPSourceCfg(x *xorm.Engine) error {
const dldapType = 5
type WrappedSource struct {
Source map[string]interface{}
Source map[string]any
}
// change lower_email as unique
@ -77,7 +77,7 @@ func UnwrapLDAPSourceCfg(x *xorm.Engine) error {
for _, source := range sources {
wrapped := &WrappedSource{
Source: map[string]interface{}{},
Source: map[string]any{},
}
err := jsonUnmarshalHandleDoubleEncode([]byte(source.Cfg), &wrapped)
if err != nil {

View File

@ -62,8 +62,8 @@ func Test_UnwrapLDAPSourceCfg(t *testing.T) {
}
for _, source := range sources {
converted := map[string]interface{}{}
expected := map[string]interface{}{}
converted := map[string]any{}
expected := map[string]any{}
if err := json.Unmarshal([]byte(source.Cfg), &converted); err != nil {
assert.NoError(t, err)

View File

@ -79,7 +79,7 @@ func Test_AddHeaderAuthorizationEncryptedColWebhook(t *testing.T) {
return
}
for _, h := range hookTasks {
var m map[string]interface{}
var m map[string]any
err := json.Unmarshal([]byte(h.PayloadContent), &m)
assert.NoError(t, err)
assert.Nil(t, m["access_token"])

View File

@ -4,13 +4,38 @@
package v1_21 //nolint
import (
"fmt"
"xorm.io/xorm"
)
func AddTimeEstimateColumnToIssueTable(x *xorm.Engine) error {
type Issue struct {
TimeEstimate int64 `xorm:"NOT NULL DEFAULT 0"`
// AddGitSizeAndLFSSizeToRepositoryTable: add GitSize and LFSSize columns to Repository
func AddGitSizeAndLFSSizeToRepositoryTable(x *xorm.Engine) error {
type Repository struct {
GitSize int64 `xorm:"NOT NULL DEFAULT 0"`
LFSSize int64 `xorm:"NOT NULL DEFAULT 0"`
}
return x.Sync(new(Issue))
sess := x.NewSession()
defer sess.Close()
if err := sess.Begin(); err != nil {
return err
}
if err := sess.Sync2(new(Repository)); err != nil {
return fmt.Errorf("Sync2: %w", err)
}
_, err := sess.Exec(`UPDATE repository SET lfs_size=(SELECT SUM(size) FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID) WHERE EXISTS (SELECT 1 FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID)`)
if err != nil {
return err
}
_, err = sess.Exec(`UPDATE repository SET git_size = size - lfs_size`)
if err != nil {
return err
}
return sess.Commit()
}

View File

@ -0,0 +1,93 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_21 //nolint
import (
"context"
"fmt"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm"
)
func AddBranchTable(x *xorm.Engine) error {
type Branch struct {
ID int64
RepoID int64 `xorm:"UNIQUE(s)"`
Name string `xorm:"UNIQUE(s) NOT NULL"`
CommitID string
CommitMessage string `xorm:"TEXT"`
PusherID int64
IsDeleted bool `xorm:"index"`
DeletedByID int64
DeletedUnix timeutil.TimeStamp `xorm:"index"`
CommitTime timeutil.TimeStamp // The commit
CreatedUnix timeutil.TimeStamp `xorm:"created"`
UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
}
if err := x.Sync(new(Branch)); err != nil {
return err
}
if exist, err := x.IsTableExist("deleted_branches"); err != nil {
return err
} else if !exist {
return nil
}
type DeletedBranch struct {
ID int64
RepoID int64 `xorm:"index UNIQUE(s)"`
Name string `xorm:"UNIQUE(s) NOT NULL"`
Commit string
DeletedByID int64
DeletedUnix timeutil.TimeStamp
}
var adminUserID int64
has, err := x.Table("user").
Select("id").
Where("is_admin=?", true).
Asc("id"). // Reliably get the admin with the lowest ID.
Get(&adminUserID)
if err != nil {
return err
} else if !has {
return fmt.Errorf("no admin user found")
}
branches := make([]Branch, 0, 100)
if err := db.Iterate(context.Background(), nil, func(ctx context.Context, deletedBranch *DeletedBranch) error {
branches = append(branches, Branch{
RepoID: deletedBranch.RepoID,
Name: deletedBranch.Name,
CommitID: deletedBranch.Commit,
PusherID: adminUserID,
IsDeleted: true,
DeletedByID: deletedBranch.DeletedByID,
DeletedUnix: deletedBranch.DeletedUnix,
})
if len(branches) >= 100 {
_, err := x.Insert(&branches)
if err != nil {
return err
}
branches = branches[:0]
}
return nil
}); err != nil {
return err
}
if len(branches) > 0 {
if _, err := x.Insert(&branches); err != nil {
return err
}
}
return x.DropTables("deleted_branches")
}

View File

@ -0,0 +1,16 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_21 //nolint
import (
"xorm.io/xorm"
)
func AddTimeEstimateColumnToIssueTable(x *xorm.Engine) error {
type Issue struct {
TimeEstimate int64 `xorm:"NOT NULL DEFAULT 0"`
}
return x.Sync(new(Issue))
}

View File

@ -81,11 +81,11 @@ func AddIssueDependencies(x *xorm.Engine) (err error) {
// RepoUnit describes all units of a repository
type RepoUnit struct {
ID int64
RepoID int64 `xorm:"INDEX(s)"`
Type int `xorm:"INDEX(s)"`
Config map[string]interface{} `xorm:"JSON"`
CreatedUnix int64 `xorm:"INDEX CREATED"`
Created time.Time `xorm:"-"`
RepoID int64 `xorm:"INDEX(s)"`
Type int `xorm:"INDEX(s)"`
Config map[string]any `xorm:"JSON"`
CreatedUnix int64 `xorm:"INDEX CREATED"`
Created time.Time `xorm:"-"`
}
// Updating existing issue units
@ -96,7 +96,7 @@ func AddIssueDependencies(x *xorm.Engine) (err error) {
}
for _, unit := range units {
if unit.Config == nil {
unit.Config = make(map[string]interface{})
unit.Config = make(map[string]any)
}
if _, ok := unit.Config["EnableDependencies"]; !ok {
unit.Config["EnableDependencies"] = setting.Service.DefaultEnableDependencies

View File

@ -15,10 +15,10 @@ func AddPullRequestRebaseWithMerge(x *xorm.Engine) error {
// RepoUnit describes all units of a repository
type RepoUnit struct {
ID int64
RepoID int64 `xorm:"INDEX(s)"`
Type int `xorm:"INDEX(s)"`
Config map[string]interface{} `xorm:"JSON"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
RepoID int64 `xorm:"INDEX(s)"`
Type int `xorm:"INDEX(s)"`
Config map[string]any `xorm:"JSON"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
}
const (
@ -46,7 +46,7 @@ func AddPullRequestRebaseWithMerge(x *xorm.Engine) error {
}
for _, unit := range units {
if unit.Config == nil {
unit.Config = make(map[string]interface{})
unit.Config = make(map[string]any)
}
// Allow the new merge style if all other merge styles are allowed
allowMergeRebase := true

View File

@ -532,27 +532,6 @@ func GetOrgsCanCreateRepoByUserID(userID int64) ([]*Organization, error) {
Find(&orgs)
}
// GetOrgUsersByUserID returns all organization-user relations by user ID.
func GetOrgUsersByUserID(uid int64, opts *SearchOrganizationsOptions) ([]*OrgUser, error) {
ous := make([]*OrgUser, 0, 10)
sess := db.GetEngine(db.DefaultContext).
Join("LEFT", "`user`", "`org_user`.org_id=`user`.id").
Where("`org_user`.uid=?", uid)
if !opts.All {
// Only show public organizations
sess.And("is_public=?", true)
}
if opts.PageSize != 0 {
sess = db.SetSessionPagination(sess, opts)
}
err := sess.
Asc("`user`.name").
Find(&ous)
return ous, err
}
// GetOrgUsersByOrgID returns all organization-user relations by organization ID.
func GetOrgUsersByOrgID(ctx context.Context, opts *FindOrgMembersOpts) ([]*OrgUser, error) {
sess := db.GetEngine(ctx).Where("org_id=?", opts.OrgID)

View File

@ -207,42 +207,6 @@ func TestFindOrgs(t *testing.T) {
assert.EqualValues(t, 1, total)
}
func TestGetOrgUsersByUserID(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
orgUsers, err := organization.GetOrgUsersByUserID(5, &organization.SearchOrganizationsOptions{All: true})
assert.NoError(t, err)
if assert.Len(t, orgUsers, 3) {
assert.Equal(t, organization.OrgUser{
ID: orgUsers[0].ID,
OrgID: 23,
UID: 5,
IsPublic: false,
}, *orgUsers[0])
assert.Equal(t, organization.OrgUser{
ID: orgUsers[1].ID,
OrgID: 6,
UID: 5,
IsPublic: true,
}, *orgUsers[1])
assert.Equal(t, organization.OrgUser{
ID: orgUsers[2].ID,
OrgID: 7,
UID: 5,
IsPublic: false,
}, *orgUsers[2])
}
publicOrgUsers, err := organization.GetOrgUsersByUserID(5, &organization.SearchOrganizationsOptions{All: false})
assert.NoError(t, err)
assert.Len(t, publicOrgUsers, 1)
assert.Equal(t, *orgUsers[1], *publicOrgUsers[0])
orgUsers, err = organization.GetOrgUsersByUserID(1, &organization.SearchOrganizationsOptions{All: true})
assert.NoError(t, err)
assert.Len(t, orgUsers, 0)
}
func TestGetOrgUsersByOrgID(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())

View File

@ -59,7 +59,7 @@ type PackageDescriptor struct {
Creator *user_model.User
PackageProperties PackagePropertyList
VersionProperties PackagePropertyList
Metadata interface{}
Metadata any
Files []*PackageFileDescriptor
}
@ -136,7 +136,7 @@ func GetPackageDescriptor(ctx context.Context, pv *PackageVersion) (*PackageDesc
return nil, err
}
var metadata interface{}
var metadata any
switch p.Type {
case TypeAlpine:
metadata = &alpine.VersionMetadata{}

View File

@ -5,11 +5,18 @@ package packages
import (
"context"
"strconv"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/perm"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
// ErrPackageBlobNotExist indicates a package blob not exist error
@ -98,3 +105,42 @@ func GetTotalUnreferencedBlobSize(ctx context.Context) (int64, error) {
Where("package_file.id IS NULL").
SumInt(&PackageBlob{}, "size")
}
// IsBlobAccessibleForUser tests if the user has access to the blob
func IsBlobAccessibleForUser(ctx context.Context, blobID int64, user *user_model.User) (bool, error) {
if user.IsAdmin {
return true, nil
}
maxTeamAuthorize := builder.
Select("max(team.authorize)").
From("team").
InnerJoin("team_user", "team_user.team_id = team.id").
Where(builder.Eq{"team_user.uid": user.ID}.And(builder.Expr("team_user.org_id = `user`.id")))
maxTeamUnitAccessMode := builder.
Select("max(team_unit.access_mode)").
From("team").
InnerJoin("team_user", "team_user.team_id = team.id").
InnerJoin("team_unit", "team_unit.team_id = team.id").
Where(builder.Eq{"team_user.uid": user.ID, "team_unit.type": unit.TypePackages}.And(builder.Expr("team_user.org_id = `user`.id")))
cond := builder.Eq{"package_blob.id": blobID}.And(
// owner = user
builder.Eq{"`user`.id": user.ID}.
// user can see owner
Or(builder.Eq{"`user`.visibility": structs.VisibleTypePublic}.Or(builder.Eq{"`user`.visibility": structs.VisibleTypeLimited})).
// owner is an organization and user has access to it
Or(builder.Eq{"`user`.type": user_model.UserTypeOrganization}.
And(builder.Lte{strconv.Itoa(int(perm.AccessModeRead)): maxTeamAuthorize}.Or(builder.Lte{strconv.Itoa(int(perm.AccessModeRead)): maxTeamUnitAccessMode}))),
)
return db.GetEngine(ctx).
Table("package_blob").
Join("INNER", "package_file", "package_file.blob_id = package_blob.id").
Join("INNER", "package_version", "package_version.id = package_file.version_id").
Join("INNER", "package", "package.id = package_version.package_id").
Join("INNER", "user", "`user`.id = package.owner_id").
Where(cond).
Exist(&PackageBlob{})
}

View File

@ -127,7 +127,8 @@ func (p *Permission) LogString() string {
}
// GetUserRepoPermission returns the user permissions to the repository
func GetUserRepoPermission(ctx context.Context, repo *repo_model.Repository, user *user_model.User) (perm Permission, err error) {
func GetUserRepoPermission(ctx context.Context, repo *repo_model.Repository, user *user_model.User) (Permission, error) {
var perm Permission
if log.IsTrace() {
defer func() {
if user == nil {
@ -147,30 +148,31 @@ func GetUserRepoPermission(ctx context.Context, repo *repo_model.Repository, use
// TODO: anonymous user visit public unit of private repo???
if user == nil && repo.IsPrivate {
perm.AccessMode = perm_model.AccessModeNone
return
return perm, nil
}
var is bool
var isCollaborator bool
var err error
if user != nil {
is, err = repo_model.IsCollaborator(ctx, repo.ID, user.ID)
isCollaborator, err = repo_model.IsCollaborator(ctx, repo.ID, user.ID)
if err != nil {
return perm, err
}
}
if err = repo.LoadOwner(ctx); err != nil {
return
if err := repo.LoadOwner(ctx); err != nil {
return perm, err
}
// Prevent strangers from checking out public repo of private organization/users
// Allow user if they are collaborator of a repo within a private user or a private organization but not a member of the organization itself
if !organization.HasOrgOrUserVisible(ctx, repo.Owner, user) && !is {
if !organization.HasOrgOrUserVisible(ctx, repo.Owner, user) && !isCollaborator {
perm.AccessMode = perm_model.AccessModeNone
return
return perm, nil
}
if err = repo.LoadUnits(ctx); err != nil {
return
if err := repo.LoadUnits(ctx); err != nil {
return perm, err
}
perm.Units = repo.Units
@ -178,32 +180,32 @@ func GetUserRepoPermission(ctx context.Context, repo *repo_model.Repository, use
// anonymous visit public repo
if user == nil {
perm.AccessMode = perm_model.AccessModeRead
return
return perm, nil
}
// Admin or the owner has super access to the repository
if user.IsAdmin || user.ID == repo.OwnerID {
perm.AccessMode = perm_model.AccessModeOwner
return
return perm, nil
}
// plain user
perm.AccessMode, err = accessLevel(ctx, user, repo)
if err != nil {
return
return perm, err
}
if err = repo.LoadOwner(ctx); err != nil {
return
if err := repo.LoadOwner(ctx); err != nil {
return perm, err
}
if !repo.Owner.IsOrganization() {
return
return perm, nil
}
perm.UnitsMode = make(map[unit.Type]perm_model.AccessMode)
// Collaborators on organization
if is {
if isCollaborator {
for _, u := range repo.Units {
perm.UnitsMode[u.Type] = perm.AccessMode
}
@ -212,7 +214,7 @@ func GetUserRepoPermission(ctx context.Context, repo *repo_model.Repository, use
// get units mode from teams
teams, err := organization.GetUserRepoTeams(ctx, repo.OwnerID, user.ID, repo.ID)
if err != nil {
return
return perm, err
}
// if user in an owner team
@ -220,7 +222,7 @@ func GetUserRepoPermission(ctx context.Context, repo *repo_model.Repository, use
if team.AccessMode >= perm_model.AccessModeAdmin {
perm.AccessMode = perm_model.AccessModeOwner
perm.UnitsMode = nil
return
return perm, nil
}
}

View File

@ -147,7 +147,7 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
&repo_model.Collaboration{RepoID: repoID},
&issues_model.Comment{RefRepoID: repoID},
&git_model.CommitStatus{RepoID: repoID},
&git_model.DeletedBranch{RepoID: repoID},
&git_model.Branch{RepoID: repoID},
&git_model.LFSLock{RepoID: repoID},
&repo_model.LanguageStat{RepoID: repoID},
&issues_model.Milestone{RepoID: repoID},
@ -456,7 +456,7 @@ func repoStatsCorrectNumClosedPulls(ctx context.Context, id int64) error {
return repo_model.UpdateRepoIssueNumbers(ctx, id, true, true)
}
func statsQuery(args ...interface{}) func(context.Context) ([]map[string][]byte, error) {
func statsQuery(args ...any) func(context.Context) ([]map[string][]byte, error) {
return func(ctx context.Context) ([]map[string][]byte, error) {
return db.GetEngine(ctx).Query(args...)
}
@ -628,14 +628,14 @@ func DoctorUserStarNum() (err error) {
for start := 0; ; start += batchSize {
users := make([]user_model.User, 0, batchSize)
if err = db.GetEngine(db.DefaultContext).Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil {
return
return err
}
if len(users) == 0 {
break
}
if err = updateUserStarNumbers(users); err != nil {
return
return err
}
}

View File

@ -33,6 +33,19 @@ func TestRepository_GetCollaborators(t *testing.T) {
test(2)
test(3)
test(4)
// Test db.ListOptions
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 22})
collaborators1, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{PageSize: 1, Page: 1})
assert.NoError(t, err)
assert.Len(t, collaborators1, 1)
collaborators2, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{PageSize: 1, Page: 2})
assert.NoError(t, err)
assert.Len(t, collaborators2, 1)
assert.NotEqualValues(t, collaborators1[0].ID, collaborators2[0].ID)
}
func TestRepository_IsCollaborator(t *testing.T) {
@ -66,5 +79,80 @@ func TestRepository_ChangeCollaborationAccessMode(t *testing.T) {
assert.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, unittest.NonexistentID, perm.AccessModeAdmin))
// Disvard invalid input.
assert.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, 4, perm.AccessMode(unittest.NonexistentID)))
unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: repo.ID})
}
func TestRepository_CountCollaborators(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
count, err := repo_model.CountCollaborators(repo1.ID)
assert.NoError(t, err)
assert.EqualValues(t, 2, count)
repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 22})
count, err = repo_model.CountCollaborators(repo2.ID)
assert.NoError(t, err)
assert.EqualValues(t, 2, count)
// Non-existent repository.
count, err = repo_model.CountCollaborators(unittest.NonexistentID)
assert.NoError(t, err)
assert.EqualValues(t, 0, count)
}
func TestRepository_IsOwnerMemberCollaborator(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
// Organisation owner.
actual, err := repo_model.IsOwnerMemberCollaborator(repo1, 2)
assert.NoError(t, err)
assert.True(t, actual)
// Team member.
actual, err = repo_model.IsOwnerMemberCollaborator(repo1, 4)
assert.NoError(t, err)
assert.True(t, actual)
// Normal user.
actual, err = repo_model.IsOwnerMemberCollaborator(repo1, 1)
assert.NoError(t, err)
assert.False(t, actual)
repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
// Collaborator.
actual, err = repo_model.IsOwnerMemberCollaborator(repo2, 4)
assert.NoError(t, err)
assert.True(t, actual)
repo3 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 15})
// Repository owner.
actual, err = repo_model.IsOwnerMemberCollaborator(repo3, 2)
assert.NoError(t, err)
assert.True(t, actual)
}
func TestRepo_GetCollaboration(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
// Existing collaboration.
collab, err := repo_model.GetCollaboration(db.DefaultContext, repo.ID, 4)
assert.NoError(t, err)
assert.NotNil(t, collab)
assert.EqualValues(t, 4, collab.UserID)
assert.EqualValues(t, 4, collab.RepoID)
// Non-existing collaboration.
collab, err = repo_model.GetCollaboration(db.DefaultContext, repo.ID, 1)
assert.NoError(t, err)
assert.Nil(t, collab)
}

View File

@ -105,7 +105,7 @@ func DeleteMirrorByRepoID(repoID int64) error {
}
// MirrorsIterate iterates all mirror repositories.
func MirrorsIterate(limit int, f func(idx int, bean interface{}) error) error {
func MirrorsIterate(limit int, f func(idx int, bean any) error) error {
sess := db.GetEngine(db.DefaultContext).
Where("next_update_unix<=?", time.Now().Unix()).
And("next_update_unix!=0").

View File

@ -127,7 +127,7 @@ func GetPushMirrorsSyncedOnCommit(ctx context.Context, repoID int64) ([]*PushMir
}
// PushMirrorsIterate iterates all push-mirror repositories.
func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean interface{}) error) error {
func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean any) error) error {
sess := db.GetEngine(ctx).
Where("last_update + (`interval` / ?) <= ?", time.Second, time.Now().Unix()).
And("`interval` != 0").

View File

@ -41,7 +41,7 @@ func TestPushMirrorsIterate(t *testing.T) {
time.Sleep(1 * time.Millisecond)
repo_model.PushMirrorsIterate(db.DefaultContext, 1, func(idx int, bean interface{}) error {
repo_model.PushMirrorsIterate(db.DefaultContext, 1, func(idx int, bean any) error {
m, ok := bean.(*repo_model.PushMirror)
assert.True(t, ok)
assert.Equal(t, "test-1", m.RemoteName)

View File

@ -339,7 +339,7 @@ func (s releaseMetaSearch) Less(i, j int) bool {
// GetReleaseAttachments retrieves the attachments for releases
func GetReleaseAttachments(ctx context.Context, rels ...*Release) (err error) {
if len(rels) == 0 {
return
return nil
}
// To keep this efficient as possible sort all releases by id,
@ -442,7 +442,7 @@ func UpdateReleasesMigrationsByType(gitServiceType structs.GitServiceType, origi
_, err := db.GetEngine(db.DefaultContext).Table("release").
Where("repo_id IN (SELECT id FROM repository WHERE original_service_type = ?)", gitServiceType).
And("original_author_id = ?", originalAuthorID).
Update(map[string]interface{}{
Update(map[string]any{
"publisher_id": posterID,
"original_author": "",
"original_author_id": 0,

View File

@ -16,6 +16,7 @@ import (
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/setting"
@ -163,6 +164,8 @@ type Repository struct {
IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
TemplateID int64 `xorm:"INDEX"`
Size int64 `xorm:"NOT NULL DEFAULT 0"`
GitSize int64 `xorm:"NOT NULL DEFAULT 0"`
LFSSize int64 `xorm:"NOT NULL DEFAULT 0"`
CodeIndexerStatus *RepoIndexerStatus `xorm:"-"`
StatsIndexerStatus *RepoIndexerStatus `xorm:"-"`
IsFsckEnabled bool `xorm:"NOT NULL DEFAULT true"`
@ -196,6 +199,42 @@ func (repo *Repository) SanitizedOriginalURL() string {
return u.String()
}
// text representations to be returned in SizeDetail.Name
const (
SizeDetailNameGit = "git"
SizeDetailNameLFS = "lfs"
)
type SizeDetail struct {
Name string
Size int64
}
// SizeDetails forms a struct with various size details about repository
func (repo *Repository) SizeDetails() []SizeDetail {
sizeDetails := []SizeDetail{
{
Name: SizeDetailNameGit,
Size: repo.GitSize,
},
{
Name: SizeDetailNameLFS,
Size: repo.LFSSize,
},
}
return sizeDetails
}
// SizeDetailsString returns a concatenation of all repository size details as a string
func (repo *Repository) SizeDetailsString() string {
var str strings.Builder
sizeDetails := repo.SizeDetails()
for _, detail := range sizeDetails {
str.WriteString(fmt.Sprintf("%s: %s, ", detail.Name, base.FileSize(detail.Size)))
}
return strings.TrimSuffix(str.String(), ", ")
}
func (repo *Repository) LogString() string {
if repo == nil {
return "<Repository nil>"
@ -489,6 +528,18 @@ func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) strin
return fmt.Sprintf("%s/%s/compare/%s...%s", url.PathEscape(repo.OwnerName), url.PathEscape(repo.Name), util.PathEscapeSegments(oldCommitID), util.PathEscapeSegments(newCommitID))
}
func (repo *Repository) ComposeBranchCompareURL(baseRepo *Repository, branchName string) string {
if baseRepo == nil {
baseRepo = repo
}
var cmpBranchEscaped string
if repo.ID != baseRepo.ID {
cmpBranchEscaped = fmt.Sprintf("%s/%s:", url.PathEscape(repo.OwnerName), url.PathEscape(repo.Name))
}
cmpBranchEscaped = fmt.Sprintf("%s%s", cmpBranchEscaped, util.PathEscapeSegments(branchName))
return fmt.Sprintf("%s/compare/%s...%s", baseRepo.Link(), util.PathEscapeSegments(baseRepo.DefaultBranch), cmpBranchEscaped)
}
// IsOwnedBy returns true when user owns this repository
func (repo *Repository) IsOwnedBy(userID int64) bool {
return repo.OwnerID == userID

Some files were not shown because too many files have changed in this diff Show More