Merge branch 'main' into fix/31709

This commit is contained in:
techknowlogick 2024-09-07 18:06:31 -04:00 committed by GitHub
commit 28ec3e838c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
287 changed files with 10305 additions and 3821 deletions

View File

@ -224,7 +224,6 @@ rules:
"@typescript-eslint/await-thenable": [2]
"@typescript-eslint/ban-ts-comment": [2, {'ts-expect-error': false, 'ts-ignore': true, 'ts-nocheck': false, 'ts-check': false}]
"@typescript-eslint/ban-tslint-comment": [0]
"@typescript-eslint/ban-types": [2, {extendDefaults: true, types: {Function: false}}]
"@typescript-eslint/class-literal-property-style": [0]
"@typescript-eslint/class-methods-use-this": [0]
"@typescript-eslint/consistent-generic-constructors": [0]
@ -255,6 +254,7 @@ rules:
"@typescript-eslint/no-dynamic-delete": [0]
"@typescript-eslint/no-empty-function": [0]
"@typescript-eslint/no-empty-interface": [0]
"@typescript-eslint/no-empty-object-type": [2]
"@typescript-eslint/no-explicit-any": [0]
"@typescript-eslint/no-extra-non-null-assertion": [2]
"@typescript-eslint/no-extraneous-class": [0]
@ -266,7 +266,7 @@ rules:
"@typescript-eslint/no-invalid-this": [0]
"@typescript-eslint/no-invalid-void-type": [0]
"@typescript-eslint/no-loop-func": [0]
"@typescript-eslint/no-loss-of-precision": [2]
"@typescript-eslint/no-loss-of-precision": [0]
"@typescript-eslint/no-magic-numbers": [0]
"@typescript-eslint/no-meaningless-void-operator": [0]
"@typescript-eslint/no-misused-new": [2]
@ -278,8 +278,9 @@ rules:
"@typescript-eslint/no-non-null-assertion": [0]
"@typescript-eslint/no-redeclare": [0]
"@typescript-eslint/no-redundant-type-constituents": [2]
"@typescript-eslint/no-require-imports": [0]
"@typescript-eslint/no-require-imports": [2]
"@typescript-eslint/no-restricted-imports": [0]
"@typescript-eslint/no-restricted-types": [0]
"@typescript-eslint/no-shadow": [0]
"@typescript-eslint/no-this-alias": [0] # handled by unicorn/no-this-assignment
"@typescript-eslint/no-unnecessary-boolean-literal-compare": [0]
@ -294,6 +295,7 @@ rules:
"@typescript-eslint/no-unsafe-call": [0]
"@typescript-eslint/no-unsafe-declaration-merging": [2]
"@typescript-eslint/no-unsafe-enum-comparison": [2]
"@typescript-eslint/no-unsafe-function-type": [2]
"@typescript-eslint/no-unsafe-member-access": [0]
"@typescript-eslint/no-unsafe-return": [0]
"@typescript-eslint/no-unsafe-unary-minus": [2]
@ -302,7 +304,7 @@ rules:
"@typescript-eslint/no-use-before-define": [0]
"@typescript-eslint/no-useless-constructor": [0]
"@typescript-eslint/no-useless-empty-export": [0]
"@typescript-eslint/no-var-requires": [2]
"@typescript-eslint/no-wrapper-object-types": [2]
"@typescript-eslint/non-nullable-type-assertion-style": [0]
"@typescript-eslint/only-throw-error": [2]
"@typescript-eslint/parameter-properties": [0]
@ -334,7 +336,7 @@ rules:
"@typescript-eslint/switch-exhaustiveness-check": [0]
"@typescript-eslint/triple-slash-reference": [2]
"@typescript-eslint/typedef": [0]
"@typescript-eslint/unbound-method": [2]
"@typescript-eslint/unbound-method": [0] # too many false-positives
"@typescript-eslint/unified-signatures": [2]
accessor-pairs: [2]
array-callback-return: [2, {checkForEach: true}]
@ -599,7 +601,7 @@ rules:
no-lone-blocks: [2]
no-lonely-if: [0]
no-loop-func: [0]
no-loss-of-precision: [0] # handled by @typescript-eslint/no-loss-of-precision
no-loss-of-precision: [2]
no-magic-numbers: [0]
no-misleading-character-class: [2]
no-multi-assign: [0]
@ -843,6 +845,7 @@ rules:
unicorn/no-invalid-fetch-options: [2]
unicorn/no-invalid-remove-event-listener: [2]
unicorn/no-keyword-prefix: [0]
unicorn/no-length-as-slice-end: [2]
unicorn/no-lonely-if: [2]
unicorn/no-magic-array-flat-depth: [0]
unicorn/no-negated-condition: [0]

View File

@ -11,7 +11,7 @@ body:
value: |
1. Please speak English, this is the language all maintainers can speak and write.
2. Please ask questions or configuration/deploy problems on our Discord
server (https://discord.gg/gitea) or forum (https://discourse.gitea.io).
server (https://discord.gg/gitea) or forum (https://forum.gitea.com).
3. Make sure you are using the latest release and
take a moment to check that your issue hasn't been reported before.
4. Make sure it's not mentioned in the FAQ (https://docs.gitea.com/help/faq)

View File

@ -7,7 +7,7 @@ contact_links:
url: https://discord.gg/Gitea
about: Please ask questions and discuss configuration or deployment problems here.
- name: Discourse Forum
url: https://discourse.gitea.io
url: https://forum.gitea.com
about: Questions and configuration or deployment problems can also be discussed on our forum.
- name: Frequently Asked Questions
url: https://docs.gitea.com/help/faq

View File

@ -7,7 +7,7 @@ body:
value: |
1. Please speak English, this is the language all maintainers can speak and write.
2. Please ask questions or configuration/deploy problems on our Discord
server (https://discord.gg/gitea) or forum (https://discourse.gitea.io).
server (https://discord.gg/gitea) or forum (https://forum.gitea.com).
3. Please take a moment to check that your feature hasn't already been suggested.
- type: textarea
id: description

View File

@ -11,7 +11,7 @@ body:
value: |
1. Please speak English, this is the language all maintainers can speak and write.
2. Please ask questions or configuration/deploy problems on our Discord
server (https://discord.gg/gitea) or forum (https://discourse.gitea.io).
server (https://discord.gg/gitea) or forum (https://forum.gitea.com).
3. Please take a moment to check that your issue doesn't already exist.
4. Make sure it's not mentioned in the FAQ (https://docs.gitea.com/help/faq)
5. Please give all relevant information below for bug reports, because

5
.github/labeler.yml vendored
View File

@ -75,3 +75,8 @@ modifies/js:
- any-glob-to-any-file:
- "**/*.js"
- "**/*.vue"
docs-update-needed:
- changed-files:
- any-glob-to-any-file:
- "custom/conf/app.example.ini"

View File

@ -137,7 +137,7 @@ TAGS ?=
TAGS_SPLIT := $(subst $(COMMA), ,$(TAGS))
TAGS_EVIDENCE := $(MAKE_EVIDENCE_DIR)/tags
TEST_TAGS ?= sqlite sqlite_unlock_notify
TEST_TAGS ?= $(TAGS_SPLIT) sqlite sqlite_unlock_notify
TAR_EXCLUDES := .git data indexers queues log node_modules $(EXECUTABLE) $(FOMANTIC_WORK_DIR)/node_modules $(DIST) $(MAKE_EVIDENCE_DIR) $(AIR_TMP_DIR) $(GO_LICENSE_TMP_DIR)

File diff suppressed because one or more lines are too long

View File

@ -290,8 +290,22 @@ Gitea or set your environment appropriately.`, "")
return nil
}
// runHookUpdate avoid to do heavy operations on update hook because it will be
// invoked for every ref update which does not like pre-receive and post-receive
func runHookUpdate(c *cli.Context) error {
if isInternal, _ := strconv.ParseBool(os.Getenv(repo_module.EnvIsInternal)); isInternal {
return nil
}
// Update is empty and is kept only for backwards compatibility
if len(os.Args) < 3 {
return nil
}
refName := git.RefName(os.Args[len(os.Args)-3])
if refName.IsPull() {
// ignore update to refs/pull/xxx/head, so we don't need to output any information
os.Exit(1)
}
return nil
}

View File

@ -2555,7 +2555,8 @@ LEVEL = Info
;LIMIT_SIZE_SWIFT = -1
;; Maximum size of a Vagrant upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_VAGRANT = -1
;; Enable RPM re-signing by default. (It will overwrite the old signature ,using v4 format, not compatible with CentOS 6 or older)
;DEFAULT_RPM_SIGN_ENABLED = false
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; default storage for attachments, lfs and avatars
@ -2684,6 +2685,14 @@ LEVEL = Info
;;
;; Default platform to get action plugins, `github` for `https://github.com`, `self` for the current Gitea instance.
;DEFAULT_ACTIONS_URL = github
;; Logs retention time in days. Old logs will be deleted after this period.
;LOG_RETENTION_DAYS = 365
;; Log compression type, `none` for no compression, `zstd` for zstd compression.
;; Other compression types like `gzip` are NOT supported, since seekable stream is required for log view.
;; It's always recommended to use compression when using local disk as log storage if CPU or memory is not a bottleneck.
;; And for object storage services like S3, which is billed for requests, it would cause extra 2 times of get requests for each log view.
;; But it will save storage space and network bandwidth, so it's still recommended to use compression.
;LOG_COMPRESSION = none
;; Default artifact retention time in days. Artifacts could have their own retention periods by setting the `retention-days` option in `actions/upload-artifact` step.
;ARTIFACT_RETENTION_DAYS = 90
;; Timeout to stop the task which have running status, but haven't been updated for a long time
@ -2704,3 +2713,9 @@ LEVEL = Info
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; storage type
;STORAGE_TYPE = local
;[global_lock]
;; Lock service type, could be memory or redis
;SERVICE_TYPE = memory
;; Ignored for the "memory" type. For "redis" use something like `redis://127.0.0.1:6379/0`
;SERVICE_CONN_STR =

28
go.mod
View File

@ -9,20 +9,22 @@ require (
codeberg.org/gusted/mcaptcha v0.0.0-20220723083913-4f3072e1d570
connectrpc.com/connect v1.15.0
gitea.com/go-chi/binding v0.0.0-20240430071103-39a851e106ed
gitea.com/go-chi/cache v0.2.0
gitea.com/go-chi/cache v0.2.1
gitea.com/go-chi/captcha v0.0.0-20240315150714-fb487f629098
gitea.com/go-chi/session v0.0.0-20240316035857-16768d98ec96
gitea.com/lunny/dingtalk_webhook v0.0.0-20171025031554-e3534c89ef96
gitea.com/lunny/levelqueue v0.4.2-0.20230414023320-3c0159fe0fe4
github.com/42wim/httpsig v1.2.2
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
github.com/ProtonMail/go-crypto v1.0.0
github.com/PuerkitoBio/goquery v1.9.2
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2
github.com/alecthomas/chroma/v2 v2.14.0
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb
github.com/blevesearch/bleve/v2 v2.4.0
github.com/blevesearch/bleve/v2 v2.4.2
github.com/buildkite/terminal-to-html/v3 v3.12.1
github.com/caddyserver/certmagic v0.21.3
github.com/chi-middleware/proxy v1.1.1
@ -44,10 +46,10 @@ require (
github.com/go-chi/cors v1.2.1
github.com/go-co-op/gocron v1.37.0
github.com/go-enry/go-enry/v2 v2.8.8
github.com/go-fed/httpsig v1.1.1-0.20201223112313-55836744818e
github.com/go-git/go-billy/v5 v5.5.0
github.com/go-git/go-git/v5 v5.12.0
github.com/go-ldap/ldap/v3 v3.4.6
github.com/go-redsync/redsync/v4 v4.13.0
github.com/go-sql-driver/mysql v1.8.1
github.com/go-swagger/go-swagger v0.31.0
github.com/go-testfixtures/testfixtures/v3 v3.11.0
@ -108,12 +110,12 @@ require (
github.com/yuin/goldmark v1.7.2
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
github.com/yuin/goldmark-meta v1.1.0
golang.org/x/crypto v0.24.0
golang.org/x/crypto v0.26.0
golang.org/x/image v0.18.0
golang.org/x/net v0.26.0
golang.org/x/oauth2 v0.21.0
golang.org/x/sys v0.21.0
golang.org/x/text v0.16.0
golang.org/x/sys v0.23.0
golang.org/x/text v0.17.0
golang.org/x/tools v0.22.0
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.34.2
@ -147,13 +149,13 @@ require (
github.com/aymerick/douceur v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/blevesearch/bleve_index_api v1.1.9 // indirect
github.com/blevesearch/bleve_index_api v1.1.10 // indirect
github.com/blevesearch/geo v0.1.20 // indirect
github.com/blevesearch/go-faiss v1.0.19 // indirect
github.com/blevesearch/go-faiss v1.0.20 // indirect
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
github.com/blevesearch/gtreap v0.1.1 // indirect
github.com/blevesearch/mmap-go v1.0.4 // indirect
github.com/blevesearch/scorch_segment_api/v2 v2.2.14 // indirect
github.com/blevesearch/scorch_segment_api/v2 v2.2.15 // indirect
github.com/blevesearch/segment v0.9.1 // indirect
github.com/blevesearch/snowballstem v0.9.0 // indirect
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
@ -163,7 +165,7 @@ require (
github.com/blevesearch/zapx/v13 v13.3.10 // indirect
github.com/blevesearch/zapx/v14 v14.3.10 // indirect
github.com/blevesearch/zapx/v15 v15.3.13 // indirect
github.com/blevesearch/zapx/v16 v16.1.4 // indirect
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
github.com/boombuler/barcode v1.0.1 // indirect
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 // indirect
github.com/caddyserver/zerossl v0.1.3 // indirect
@ -188,6 +190,7 @@ require (
github.com/go-enry/go-oniguruma v1.2.1 // indirect
github.com/go-faster/city v1.0.1 // indirect
github.com/go-faster/errors v0.7.1 // indirect
github.com/go-fed/httpsig v1.1.1-0.20201223112313-55836744818e // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
github.com/go-openapi/errors v0.22.0 // indirect
@ -209,6 +212,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/go-tpm v0.9.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
@ -216,7 +220,9 @@ require (
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/securecookie v1.1.2 // indirect
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
@ -295,7 +301,7 @@ require (
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect

100
go.sum
View File

@ -20,8 +20,8 @@ gitea.com/gitea/act v0.259.1 h1:8GG1o/xtUHl3qjn5f0h/2FXrT5ubBn05TJOM5ry+FBw=
gitea.com/gitea/act v0.259.1/go.mod h1:UxZWRYqQG2Yj4+4OqfGWW5a3HELwejyWFQyU7F1jUD8=
gitea.com/go-chi/binding v0.0.0-20240430071103-39a851e106ed h1:EZZBtilMLSZNWtHHcgq2mt6NSGhJSZBuduAlinMEmso=
gitea.com/go-chi/binding v0.0.0-20240430071103-39a851e106ed/go.mod h1:E3i3cgB04dDx0v3CytCgRTTn9Z/9x891aet3r456RVw=
gitea.com/go-chi/cache v0.2.0 h1:E0npuTfDW6CT1yD8NMDVc1SK6IeRjfmRL2zlEsCEd7w=
gitea.com/go-chi/cache v0.2.0/go.mod h1:iQlVK2aKTZ/rE9UcHyz9pQWGvdP9i1eI2spOpzgCrtE=
gitea.com/go-chi/cache v0.2.1 h1:bfAPkvXlbcZxPCpcmDVCWoHgiBSBmZN/QosnZvEC0+g=
gitea.com/go-chi/cache v0.2.1/go.mod h1:Qic0HZ8hOHW62ETGbonpwz8WYypj9NieU9659wFUJ8Q=
gitea.com/go-chi/captcha v0.0.0-20240315150714-fb487f629098 h1:p2ki+WK0cIeNQuqjR98IP2KZQKRzJJiV7aTeMAFwaWo=
gitea.com/go-chi/captcha v0.0.0-20240315150714-fb487f629098/go.mod h1:LjzIOHlRemuUyO7WR12fmm18VZIlCAaOt9L3yKw40pk=
gitea.com/go-chi/session v0.0.0-20240316035857-16768d98ec96 h1:IFDiMBObsP6CZIRaDLd54SR6zPYAffPXiXck5Xslu0Q=
@ -32,6 +32,8 @@ gitea.com/lunny/levelqueue v0.4.2-0.20230414023320-3c0159fe0fe4 h1:IFT+hup2xejHq
gitea.com/lunny/levelqueue v0.4.2-0.20230414023320-3c0159fe0fe4/go.mod h1:HBqmLbz56JWpfEGG0prskAV97ATNRoj5LDmPicD22hU=
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
github.com/42wim/httpsig v1.2.2 h1:ofAYoHUNs/MJOLqQ8hIxeyz2QxOz8qdSVvp3PX/oPgA=
github.com/42wim/httpsig v1.2.2/go.mod h1:P/UYo7ytNBFwc+dg35IubuAUIs8zj5zzFIgUCEl55WY=
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121 h1:r3qt8PCHnfjOv9PN3H+XXKmDA1dfFMIN1AislhlA/ps=
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121/go.mod h1:Ock8XgA7pvULhIaHGAk/cDnRfNrF9Jey81nPcc403iU=
github.com/6543/go-version v1.3.1 h1:HvOp+Telns7HWJ2Xo/05YXQSB2bE0WmVgbHqwMPZT4U=
@ -80,6 +82,8 @@ github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06
github.com/RoaringBitmap/roaring v0.7.1/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ=
github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2 h1:cSXom2MoKJ9KPPw29RoZtHvUETY4F4n/kXl8m9btnQ0=
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2/go.mod h1:JitQWJ8JuV4Y87l8VsHiiwhb3cgdyn68mX40s7NT6PA=
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs=
@ -117,15 +121,15 @@ github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4=
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/blevesearch/bleve/v2 v2.0.5/go.mod h1:ZjWibgnbRX33c+vBRgla9QhPb4QOjD6fdVJ+R1Bk8LM=
github.com/blevesearch/bleve/v2 v2.4.0 h1:2xyg+Wv60CFHYccXc+moGxbL+8QKT/dZK09AewHgKsg=
github.com/blevesearch/bleve/v2 v2.4.0/go.mod h1:IhQHoFAbHgWKYavb9rQgQEJJVMuY99cKdQ0wPpst2aY=
github.com/blevesearch/bleve/v2 v2.4.2 h1:NooYP1mb3c0StkiY9/xviiq2LGSaE8BQBCc/pirMx0U=
github.com/blevesearch/bleve/v2 v2.4.2/go.mod h1:ATNKj7Yl2oJv/lGuF4kx39bST2dveX6w0th2FFYLkc8=
github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4=
github.com/blevesearch/bleve_index_api v1.1.9 h1:Cpq0Lp3As0Gfk3+PmcoNDRKeI50C5yuFNpj0YlN/bOE=
github.com/blevesearch/bleve_index_api v1.1.9/go.mod h1:PbcwjIcRmjhGbkS/lJCpfgVSMROV6TRubGGAODaK1W8=
github.com/blevesearch/bleve_index_api v1.1.10 h1:PDLFhVjrjQWr6jCuU7TwlmByQVCSEURADHdCqVS9+g0=
github.com/blevesearch/bleve_index_api v1.1.10/go.mod h1:PbcwjIcRmjhGbkS/lJCpfgVSMROV6TRubGGAODaK1W8=
github.com/blevesearch/geo v0.1.20 h1:paaSpu2Ewh/tn5DKn/FB5SzvH0EWupxHEIwbCk/QPqM=
github.com/blevesearch/geo v0.1.20/go.mod h1:DVG2QjwHNMFmjo+ZgzrIq2sfCh6rIHzy9d9d0B59I6w=
github.com/blevesearch/go-faiss v1.0.19 h1:UKoP8hS7DVsVSRRloNJb4qPfe2UQ99pP4D3oXd23g2A=
github.com/blevesearch/go-faiss v1.0.19/go.mod h1:jrxHrbl42X/RnDPI+wBoZU8joxxuRwedrxqswQ3xfU8=
github.com/blevesearch/go-faiss v1.0.20 h1:AIkdTQFWuZ5LQmKQSebgMR4RynGNw8ZseJXaan5kvtI=
github.com/blevesearch/go-faiss v1.0.20/go.mod h1:jrxHrbl42X/RnDPI+wBoZU8joxxuRwedrxqswQ3xfU8=
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
github.com/blevesearch/gtreap v0.1.1 h1:2JWigFrzDMR+42WGIN/V2p0cUvn4UP3C4Q5nmaZGW8Y=
@ -134,8 +138,8 @@ github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+
github.com/blevesearch/mmap-go v1.0.4 h1:OVhDhT5B/M1HNPpYPBKIEJaD0F3Si+CrEKULGCDPWmc=
github.com/blevesearch/mmap-go v1.0.4/go.mod h1:EWmEAOmdAS9z/pi/+Toxu99DnsbhG1TIxUoRmJw/pSs=
github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU=
github.com/blevesearch/scorch_segment_api/v2 v2.2.14 h1:fgMLMpGWR7u2TdRm7XSZVWhPvMAcdYHh25Lq1fQ6Fjo=
github.com/blevesearch/scorch_segment_api/v2 v2.2.14/go.mod h1:B7+a7vfpY4NsjuTkpv/eY7RZ91Xr90VaJzT2t7upZN8=
github.com/blevesearch/scorch_segment_api/v2 v2.2.15 h1:prV17iU/o+A8FiZi9MXmqbagd8I0bCqM7OKUYPbnb5Y=
github.com/blevesearch/scorch_segment_api/v2 v2.2.15/go.mod h1:db0cmP03bPNadXrCDuVkKLV6ywFSiRgPFT1YVrestBc=
github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ=
github.com/blevesearch/segment v0.9.1 h1:+dThDy+Lvgj5JMxhmOVlgFfkUtZV2kw49xax4+jTfSU=
github.com/blevesearch/segment v0.9.1/go.mod h1:zN21iLm7+GnBHWTao9I+Au/7MBiL8pPFtJBJTsk6kQw=
@ -163,12 +167,11 @@ github.com/blevesearch/zapx/v14 v14.3.10/go.mod h1:qqyuR0u230jN1yMmE4FIAuCxmahRQ
github.com/blevesearch/zapx/v15 v15.2.0/go.mod h1:MmQceLpWfME4n1WrBFIwplhWmaQbQqLQARpaKUEOs/A=
github.com/blevesearch/zapx/v15 v15.3.13 h1:6EkfaZiPlAxqXz0neniq35my6S48QI94W/wyhnpDHHQ=
github.com/blevesearch/zapx/v15 v15.3.13/go.mod h1:Turk/TNRKj9es7ZpKK95PS7f6D44Y7fAFy8F4LXQtGg=
github.com/blevesearch/zapx/v16 v16.1.4 h1:TBQfG77g2UUXwfjOVcEtB9pXkg6JBmGXkeZKI67+TiA=
github.com/blevesearch/zapx/v16 v16.1.4/go.mod h1:+Q+Z89Iv7ewhdX2jyE6Qs/RUnN4tZuokaQ0xvTaFmx8=
github.com/blevesearch/zapx/v16 v16.1.5 h1:b0sMcarqNFxuXvjoXsF8WtwVahnxyhEvBSRJi/AUHjU=
github.com/blevesearch/zapx/v16 v16.1.5/go.mod h1:J4mSF39w1QELc11EWRSBFkPeZuO7r/NPKkHzDCoiaI8=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous=
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
@ -212,7 +215,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -245,7 +247,6 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
github.com/dvyukov/go-fuzz v0.0.0-20210429054444-fca39067bc72/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/editorconfig/editorconfig-core-go/v2 v2.6.2 h1:dKG8sc7n321deIVRcQtwlMNoBEra7j0qQ8RwxO8RN0w=
github.com/editorconfig/editorconfig-core-go/v2 v2.6.2/go.mod h1:7dvD3GCm7eBw53xZ/lsiq72LqobdMg3ITbMBxnmJmqY=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
@ -339,8 +340,14 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI=
github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA=
github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-swagger/go-swagger v0.31.0 h1:H8eOYQnY2u7vNKWDNykv2xJP3pBhRG/R+SOCAmKrLlc=
@ -379,7 +386,6 @@ github.com/golang/geo v0.0.0-20230421003525-6adc56603217/go.mod h1:8wI0hitZ3a1Ix
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@ -395,6 +401,10 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@ -419,7 +429,6 @@ github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw=
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -445,10 +454,15 @@ github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE=
github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
@ -535,13 +549,10 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de/go.mod h1:3q8WtuPQsoRbatJuy3nvq/hRSvuBJrHHr+ybPPiNvHQ=
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af/go.mod h1:Cqz6pqow14VObJ7peltM+2n3PWOz7yTrfUuGbVFkzN0=
github.com/lunny/vfsgen v0.0.0-20220105142115-2c99e1ffdfa0 h1:F/3FfGmKdiKFa8kL3YrpZ7pe9H4l4AzA1pbaOUnRvPI=
github.com/lunny/vfsgen v0.0.0-20220105142115-2c99e1ffdfa0/go.mod h1:JEfTc3+2DF9Z4PXhLLvXL42zexJyh8rIq3OzUj/0rAk=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@ -561,7 +572,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/meilisearch/meilisearch-go v0.26.3 h1:EYt+C1n7IvjKzgXM3xqce5iJzNBq33F7ayZOKx96TKY=
@ -619,12 +629,10 @@ github.com/olivere/elastic/v7 v7.0.32 h1:R7CXvbu8Eq+WlsLgxmKVKPox0oOwAE/2T9Si5Bn
github.com/olivere/elastic/v7 v7.0.32/go.mod h1:c7PVmLe3Fxq77PIfY/bZmxY/TAamBhCzZ8xDOE09a9k=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
@ -638,7 +646,6 @@ github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU=
github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@ -670,6 +677,8 @@ github.com/quasoft/websspi v1.1.2/go.mod h1:HmVdl939dQ0WIXZhyik+ARdI03M6bQzaSEKc
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/v9 v9.6.0 h1:NLck+Rab3AOTHw21CGRpvQpgTrAU4sgdCswqGtlhGRA=
github.com/redis/go-redis/v9 v9.6.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo=
github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rhysd/actionlint v1.7.1 h1:WJaDzyT1StBWVKGSsZPYnbV0HF9Y9/vD6KFdZQL42qE=
@ -708,16 +717,11 @@ github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d/go.mod h1:vq0tzqLRu6TS7Id0wMo2N5QzJoKedVeovOpHjnykSzY=
github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg=
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.1.1 h1:T/YLemO5Yp7KPzS+lVtu+WsHn8yoSwTfItdAd1r3cck=
github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
@ -761,6 +765,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM=
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
@ -776,7 +782,6 @@ github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
github.com/unknwon/com v1.0.1 h1:3d1LTxD+Lnf3soQiD4Cp/0BRB+Rsa/+RTvz8GMMzIXs=
github.com/unknwon/com v1.0.1/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
@ -852,7 +857,6 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
@ -864,8 +868,8 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f h1:3CW0unweImhOzd5FmYuRsD4Y4oQFKZIjAnKbjV4WIrw=
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
@ -879,11 +883,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@ -910,15 +911,13 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -949,8 +948,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@ -960,10 +959,9 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@ -973,13 +971,11 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
@ -995,7 +991,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
@ -1021,7 +1016,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE=
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw=
gopkg.in/ini.v1 v1.44.2/go.mod h1:M3Cogqpuv0QCi3ExAY5V4uOt4qb/R3xZubo9m8lK5wg=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=

View File

@ -361,6 +361,19 @@ func GetRunByIndex(ctx context.Context, repoID, index int64) (*ActionRun, error)
return run, nil
}
func GetLatestRun(ctx context.Context, repoID int64) (*ActionRun, error) {
run := &ActionRun{
RepoID: repoID,
}
has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).Desc("index").Get(run)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("latest run with repo_id %d: %w", repoID, util.ErrNotExist)
}
return run, nil
}
func GetWorkflowLatestRun(ctx context.Context, repoID int64, workflowFile, branch, event string) (*ActionRun, error) {
var run ActionRun
q := db.GetEngine(ctx).Where("repo_id=?", repoID).

View File

@ -23,14 +23,25 @@ import (
)
// ActionRunner represents runner machines
//
// It can be:
// 1. global runner, OwnerID is 0 and RepoID is 0
// 2. org/user level runner, OwnerID is org/user ID and RepoID is 0
// 3. repo level runner, OwnerID is 0 and RepoID is repo ID
//
// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
// or it will be complicated to find runners belonging to a specific owner.
// For example, conditions like `OwnerID = 1` will also return runner {OwnerID: 1, RepoID: 1},
// but it's a repo level runner, not an org/user level runner.
// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level runners.
type ActionRunner struct {
ID int64
UUID string `xorm:"CHAR(36) UNIQUE"`
Name string `xorm:"VARCHAR(255)"`
Version string `xorm:"VARCHAR(64)"`
OwnerID int64 `xorm:"index"` // org level runner, 0 means system
OwnerID int64 `xorm:"index"`
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"index"` // repo level runner, if OwnerID also is zero, then it's a global
RepoID int64 `xorm:"index"`
Repo *repo_model.Repository `xorm:"-"`
Description string `xorm:"TEXT"`
Base int // 0 native 1 docker 2 virtual machine
@ -157,7 +168,7 @@ func init() {
type FindRunnerOptions struct {
db.ListOptions
RepoID int64
OwnerID int64
OwnerID int64 // it will be ignored if RepoID is set
Sort string
Filter string
IsOnline optional.Option[bool]
@ -174,8 +185,7 @@ func (opts FindRunnerOptions) ToConds() builder.Cond {
c = c.Or(builder.Eq{"repo_id": 0, "owner_id": 0})
}
cond = cond.And(c)
}
if opts.OwnerID > 0 {
} else if opts.OwnerID > 0 { // OwnerID is ignored if RepoID is set
c := builder.NewCond().And(builder.Eq{"owner_id": opts.OwnerID})
if opts.WithAvailable {
c = c.Or(builder.Eq{"repo_id": 0, "owner_id": 0})
@ -263,6 +273,11 @@ func DeleteRunner(ctx context.Context, id int64) error {
// CreateRunner creates new runner.
func CreateRunner(ctx context.Context, t *ActionRunner) error {
if t.OwnerID != 0 && t.RepoID != 0 {
// It's trying to create a runner that belongs to a repository, but OwnerID has been set accidentally.
// Remove OwnerID to avoid confusion; it's not worth returning an error here.
t.OwnerID = 0
}
return db.Insert(ctx, t)
}

View File

@ -15,12 +15,23 @@ import (
)
// ActionRunnerToken represents runner tokens
//
// It can be:
// 1. global token, OwnerID is 0 and RepoID is 0
// 2. org/user level token, OwnerID is org/user ID and RepoID is 0
// 3. repo level token, OwnerID is 0 and RepoID is repo ID
//
// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
// or it will be complicated to find tokens belonging to a specific owner.
// For example, conditions like `OwnerID = 1` will also return token {OwnerID: 1, RepoID: 1},
// but it's a repo level token, not an org/user level token.
// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level tokens.
type ActionRunnerToken struct {
ID int64
Token string `xorm:"UNIQUE"`
OwnerID int64 `xorm:"index"` // org level runner, 0 means system
OwnerID int64 `xorm:"index"`
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
RepoID int64 `xorm:"index"`
Repo *repo_model.Repository `xorm:"-"`
IsActive bool // true means it can be used
@ -58,7 +69,14 @@ func UpdateRunnerToken(ctx context.Context, r *ActionRunnerToken, cols ...string
}
// NewRunnerToken creates a new active runner token and invalidate all old tokens
// ownerID will be ignored and treated as 0 if repoID is non-zero.
func NewRunnerToken(ctx context.Context, ownerID, repoID int64) (*ActionRunnerToken, error) {
if ownerID != 0 && repoID != 0 {
// It's trying to create a runner token that belongs to a repository, but OwnerID has been set accidentally.
// Remove OwnerID to avoid confusion; it's not worth returning an error here.
ownerID = 0
}
token, err := util.CryptoRandomString(40)
if err != nil {
return nil, err
@ -84,6 +102,12 @@ func NewRunnerToken(ctx context.Context, ownerID, repoID int64) (*ActionRunnerTo
// GetLatestRunnerToken returns the latest runner token
func GetLatestRunnerToken(ctx context.Context, ownerID, repoID int64) (*ActionRunnerToken, error) {
if ownerID != 0 && repoID != 0 {
// It's trying to get a runner token that belongs to a repository, but OwnerID has been set accidentally.
// Remove OwnerID to avoid confusion; it's not worth returning an error here.
ownerID = 0
}
var runnerToken ActionRunnerToken
has, err := db.GetEngine(ctx).Where("owner_id=? AND repo_id=?", ownerID, repoID).
OrderBy("id DESC").Get(&runnerToken)

View File

@ -13,8 +13,6 @@ import (
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/timeutil"
webhook_module "code.gitea.io/gitea/modules/webhook"
"github.com/robfig/cron/v3"
)
// ActionSchedule represents a schedule of a workflow file
@ -53,8 +51,6 @@ func GetReposMapByIDs(ctx context.Context, ids []int64) (map[int64]*repo_model.R
return repos, db.GetEngine(ctx).In("id", ids).Find(&repos)
}
var cronParser = cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)
// CreateScheduleTask creates new schedule task.
func CreateScheduleTask(ctx context.Context, rows []*ActionSchedule) error {
// Return early if there are no rows to insert
@ -80,19 +76,21 @@ func CreateScheduleTask(ctx context.Context, rows []*ActionSchedule) error {
now := time.Now()
for _, spec := range row.Specs {
specRow := &ActionScheduleSpec{
RepoID: row.RepoID,
ScheduleID: row.ID,
Spec: spec,
}
// Parse the spec and check for errors
schedule, err := cronParser.Parse(spec)
schedule, err := specRow.Parse()
if err != nil {
continue // skip to the next spec if there's an error
}
specRow.Next = timeutil.TimeStamp(schedule.Next(now).Unix())
// Insert the new schedule spec row
if err = db.Insert(ctx, &ActionScheduleSpec{
RepoID: row.RepoID,
ScheduleID: row.ID,
Spec: spec,
Next: timeutil.TimeStamp(schedule.Next(now).Unix()),
}); err != nil {
if err = db.Insert(ctx, specRow); err != nil {
return err
}
}

View File

@ -5,6 +5,8 @@ package actions
import (
"context"
"strings"
"time"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
@ -32,8 +34,29 @@ type ActionScheduleSpec struct {
Updated timeutil.TimeStamp `xorm:"updated"`
}
// Parse parses the spec and returns a cron.Schedule
// Unlike the default cron parser, Parse uses UTC timezone as the default if none is specified.
func (s *ActionScheduleSpec) Parse() (cron.Schedule, error) {
return cronParser.Parse(s.Spec)
parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)
schedule, err := parser.Parse(s.Spec)
if err != nil {
return nil, err
}
// If the spec has specified a timezone, use it
if strings.HasPrefix(s.Spec, "TZ=") || strings.HasPrefix(s.Spec, "CRON_TZ=") {
return schedule, nil
}
specSchedule, ok := schedule.(*cron.SpecSchedule)
// If it's not a spec schedule, like "@every 5m", timezone is not relevant
if !ok {
return schedule, nil
}
// Set the timezone to UTC
specSchedule.Location = time.UTC
return specSchedule, nil
}
func init() {

View File

@ -0,0 +1,71 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestActionScheduleSpec_Parse(t *testing.T) {
// Mock the local timezone is not UTC
local := time.Local
tz, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
defer func() {
time.Local = local
}()
time.Local = tz
now, err := time.Parse(time.RFC3339, "2024-07-31T15:47:55+08:00")
require.NoError(t, err)
tests := []struct {
name string
spec string
want string
wantErr assert.ErrorAssertionFunc
}{
{
name: "regular",
spec: "0 10 * * *",
want: "2024-07-31T10:00:00Z",
wantErr: assert.NoError,
},
{
name: "invalid",
spec: "0 10 * *",
want: "",
wantErr: assert.Error,
},
{
name: "with timezone",
spec: "TZ=America/New_York 0 10 * * *",
want: "2024-07-31T14:00:00Z",
wantErr: assert.NoError,
},
{
name: "timezone irrelevant",
spec: "@every 5m",
want: "2024-07-31T07:52:55Z",
wantErr: assert.NoError,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &ActionScheduleSpec{
Spec: tt.spec,
}
got, err := s.Parse()
tt.wantErr(t, err)
if err == nil {
assert.Equal(t, tt.want, got.Next(now).UTC().Format(time.RFC3339))
}
})
}
}

View File

@ -35,7 +35,7 @@ type ActionTask struct {
RunnerID int64 `xorm:"index"`
Status Status `xorm:"index"`
Started timeutil.TimeStamp `xorm:"index"`
Stopped timeutil.TimeStamp
Stopped timeutil.TimeStamp `xorm:"index(stopped_log_expired)"`
RepoID int64 `xorm:"index"`
OwnerID int64 `xorm:"index"`
@ -52,7 +52,7 @@ type ActionTask struct {
LogLength int64 // lines count
LogSize int64 // blob size
LogIndexes LogIndexes `xorm:"LONGBLOB"` // line number to offset
LogExpired bool // files that are too old will be deleted
LogExpired bool `xorm:"index(stopped_log_expired)"` // files that are too old will be deleted
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated index"`
@ -470,6 +470,16 @@ func StopTask(ctx context.Context, taskID int64, status Status) error {
return nil
}
func FindOldTasksToExpire(ctx context.Context, olderThan timeutil.TimeStamp, limit int) ([]*ActionTask, error) {
e := db.GetEngine(ctx)
tasks := make([]*ActionTask, 0, limit)
// Check "stopped > 0" to avoid deleting tasks that are still running
return tasks, e.Where("stopped > 0 AND stopped < ? AND log_expired = ?", olderThan, false).
Limit(limit).
Find(&tasks)
}
func isSubset(set, subset []string) bool {
m := make(container.Set[string], len(set))
for _, v := range set {
@ -492,7 +502,13 @@ func convertTimestamp(timestamp *timestamppb.Timestamp) timeutil.TimeStamp {
}
func logFileName(repoFullName string, taskID int64) string {
return fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)
ret := fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)
if setting.Actions.LogCompression.IsZstd() {
ret += ".zst"
}
return ret
}
func getTaskIDFromCache(token string) int64 {

View File

@ -5,7 +5,6 @@ package actions
import (
"context"
"errors"
"strings"
"code.gitea.io/gitea/models/db"
@ -15,6 +14,18 @@ import (
"xorm.io/builder"
)
// ActionVariable represents a variable that can be used in actions
//
// It can be:
// 1. global variable, OwnerID is 0 and RepoID is 0
// 2. org/user level variable, OwnerID is org/user ID and RepoID is 0
// 3. repo level variable, OwnerID is 0 and RepoID is repo ID
//
// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
// or it will be complicated to find variables belonging to a specific owner.
// For example, conditions like `OwnerID = 1` will also return variable {OwnerID: 1, RepoID: 1},
// but it's a repo level variable, not an org/user level variable.
// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level variables.
type ActionVariable struct {
ID int64 `xorm:"pk autoincr"`
OwnerID int64 `xorm:"UNIQUE(owner_repo_name)"`
@ -29,30 +40,26 @@ func init() {
db.RegisterModel(new(ActionVariable))
}
func (v *ActionVariable) Validate() error {
if v.OwnerID != 0 && v.RepoID != 0 {
return errors.New("a variable should not be bound to an owner and a repository at the same time")
}
return nil
func InsertVariable(ctx context.Context, ownerID, repoID int64, name, data string) (*ActionVariable, error) {
if ownerID != 0 && repoID != 0 {
// It's trying to create a variable that belongs to a repository, but OwnerID has been set accidentally.
// Remove OwnerID to avoid confusion; it's not worth returning an error here.
ownerID = 0
}
func InsertVariable(ctx context.Context, ownerID, repoID int64, name, data string) (*ActionVariable, error) {
variable := &ActionVariable{
OwnerID: ownerID,
RepoID: repoID,
Name: strings.ToUpper(name),
Data: data,
}
if err := variable.Validate(); err != nil {
return variable, err
}
return variable, db.Insert(ctx, variable)
}
type FindVariablesOpts struct {
db.ListOptions
OwnerID int64
RepoID int64
OwnerID int64 // it will be ignored if RepoID is set
Name string
}
@ -60,8 +67,13 @@ func (opts FindVariablesOpts) ToConds() builder.Cond {
cond := builder.NewCond()
// Since we now support instance-level variables,
// there is no need to check for null values for `owner_id` and `repo_id`
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
if opts.RepoID != 0 { // if RepoID is set
// ignore OwnerID and treat it as 0
cond = cond.And(builder.Eq{"owner_id": 0})
} else {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
}
if opts.Name != "" {
cond = cond.And(builder.Eq{"name": strings.ToUpper(opts.Name)})

View File

@ -450,6 +450,10 @@ func GetFeeds(ctx context.Context, opts GetFeedsOptions) (ActionList, int64, err
return nil, 0, err
}
actions := make([]*Action, 0, opts.PageSize)
var count int64
if opts.Page < 10 { // TODO: why it's 10 but other values? It's an experience value.
sess := db.GetEngine(ctx).Where(cond).
Select("`action`.*"). // this line will avoid select other joined table's columns
Join("INNER", "repository", "`repository`.id = `action`.repo_id")
@ -457,11 +461,36 @@ func GetFeeds(ctx context.Context, opts GetFeedsOptions) (ActionList, int64, err
opts.SetDefaultValues()
sess = db.SetSessionPagination(sess, &opts)
actions := make([]*Action, 0, opts.PageSize)
count, err := sess.Desc("`action`.created_unix").FindAndCount(&actions)
count, err = sess.Desc("`action`.created_unix").FindAndCount(&actions)
if err != nil {
return nil, 0, fmt.Errorf("FindAndCount: %w", err)
}
} else {
// First, only query which IDs are necessary, and only then query all actions to speed up the overall query
sess := db.GetEngine(ctx).Where(cond).
Select("`action`.id").
Join("INNER", "repository", "`repository`.id = `action`.repo_id")
opts.SetDefaultValues()
sess = db.SetSessionPagination(sess, &opts)
actionIDs := make([]int64, 0, opts.PageSize)
if err := sess.Table("action").Desc("`action`.created_unix").Find(&actionIDs); err != nil {
return nil, 0, fmt.Errorf("Find(actionsIDs): %w", err)
}
count, err = db.GetEngine(ctx).Where(cond).
Table("action").
Cols("`action`.id").
Join("INNER", "repository", "`repository`.id = `action`.repo_id").Count()
if err != nil {
return nil, 0, fmt.Errorf("Count: %w", err)
}
if err := db.GetEngine(ctx).In("`action`.id", actionIDs).Desc("`action`.created_unix").Find(&actions); err != nil {
return nil, 0, fmt.Errorf("Find: %w", err)
}
}
if err := ActionList(actions).LoadAttributes(ctx); err != nil {
return nil, 0, fmt.Errorf("LoadAttributes: %w", err)

View File

@ -229,35 +229,26 @@ func UpdatePublicKeyUpdated(ctx context.Context, id int64) error {
// PublicKeysAreExternallyManaged returns whether the provided KeyID represents an externally managed Key
func PublicKeysAreExternallyManaged(ctx context.Context, keys []*PublicKey) ([]bool, error) {
sources := make([]*auth.Source, 0, 5)
sourceCache := make(map[int64]*auth.Source, len(keys))
externals := make([]bool, len(keys))
keyloop:
for i, key := range keys {
if key.LoginSourceID == 0 {
externals[i] = false
continue keyloop
continue
}
var source *auth.Source
sourceloop:
for _, s := range sources {
if s.ID == key.LoginSourceID {
source = s
break sourceloop
}
}
if source == nil {
source, ok := sourceCache[key.LoginSourceID]
if !ok {
var err error
source, err = auth.GetSourceByID(ctx, key.LoginSourceID)
if err != nil {
if auth.IsErrSourceNotExist(err) {
externals[i] = false
sources[i] = &auth.Source{
sourceCache[key.LoginSourceID] = &auth.Source{
ID: key.LoginSourceID,
}
continue keyloop
continue
}
return nil, err
}

View File

@ -12,6 +12,8 @@ import (
"strings"
"testing"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/setting"
"github.com/42wim/sshsig"
@ -26,7 +28,6 @@ func Test_SSHParsePublicKey(t *testing.T) {
length int
content string
}{
{"dsa-1024", false, "dsa", 1024, "ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag= nocomment"},
{"rsa-1024", false, "rsa", 1024, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\n"},
{"rsa-2048", false, "rsa", 2048, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMZXh+1OBUwSH9D45wTaxErQIN9IoC9xl7MKJkqvTvv6O5RR9YW/IK9FbfjXgXsppYGhsCZo1hFOOsXHMnfOORqu/xMDx4yPuyvKpw4LePEcg4TDipaDFuxbWOqc/BUZRZcXu41QAWfDLrInwsltWZHSeG7hjhpacl4FrVv9V1pS6Oc5Q1NxxEzTzuNLS/8diZrTm/YAQQ/+B+mzWI3zEtF4miZjjAljWd1LTBPvU23d29DcBmmFahcZ441XZsTeAwGxG/Q6j8NgNXj9WxMeWwxXV2jeAX/EBSpZrCVlCQ1yJswT6xCp8TuBnTiGWYMBNTbOZvPC4e0WI2/yZW/s5F nocomment"},
{"ecdsa-256", false, "ecdsa", 256, "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFQacN3PrOll7PXmN5B/ZNVahiUIqI05nbBlZk1KXsO3d06ktAWqbNflv2vEmA38bTFTfJ2sbn2B5ksT52cDDbA= nocomment"},
@ -170,7 +171,6 @@ func Test_calcFingerprint(t *testing.T) {
fp string
content string
}{
{"dsa-1024", false, "SHA256:fSIHQlpKMDsGPVAXI8BPYfRp+e2sfvSt1sMrPsFiXrc", "ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag= nocomment"},
{"rsa-1024", false, "SHA256:vSnDkvRh/xM6kMxPidLgrUhq3mCN7CDaronCEm2joyQ", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\n"},
{"rsa-2048", false, "SHA256:ZHD//a1b9VuTq9XSunAeYjKeU1xDa2tBFZYrFr2Okkg", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMZXh+1OBUwSH9D45wTaxErQIN9IoC9xl7MKJkqvTvv6O5RR9YW/IK9FbfjXgXsppYGhsCZo1hFOOsXHMnfOORqu/xMDx4yPuyvKpw4LePEcg4TDipaDFuxbWOqc/BUZRZcXu41QAWfDLrInwsltWZHSeG7hjhpacl4FrVv9V1pS6Oc5Q1NxxEzTzuNLS/8diZrTm/YAQQ/+B+mzWI3zEtF4miZjjAljWd1LTBPvU23d29DcBmmFahcZ441XZsTeAwGxG/Q6j8NgNXj9WxMeWwxXV2jeAX/EBSpZrCVlCQ1yJswT6xCp8TuBnTiGWYMBNTbOZvPC4e0WI2/yZW/s5F nocomment"},
{"ecdsa-256", false, "SHA256:Bqx/xgWqRKLtkZ0Lr4iZpgb+5lYsFpSwXwVZbPwuTRw", "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFQacN3PrOll7PXmN5B/ZNVahiUIqI05nbBlZk1KXsO3d06ktAWqbNflv2vEmA38bTFTfJ2sbn2B5ksT52cDDbA= nocomment"},
@ -503,3 +503,11 @@ func runErr(t *testing.T, stdin []byte, args ...string) {
t.Fatal("expected error")
}
}
func Test_PublicKeysAreExternallyManaged(t *testing.T) {
key1 := unittest.AssertExistsAndLoadBean(t, &PublicKey{ID: 1})
externals, err := PublicKeysAreExternallyManaged(db.DefaultContext, []*PublicKey{key1})
assert.NoError(t, err)
assert.Len(t, externals, 1)
assert.False(t, externals[0])
}

View File

@ -309,6 +309,22 @@ func (s AccessTokenScope) HasScope(scopes ...AccessTokenScope) (bool, error) {
return true, nil
}
// HasAnyScope returns true if any of the scopes is contained in the string
func (s AccessTokenScope) HasAnyScope(scopes ...AccessTokenScope) (bool, error) {
bitmap, err := s.parse()
if err != nil {
return false, err
}
for _, s := range scopes {
if has, err := bitmap.hasScope(s); has || err != nil {
return has, err
}
}
return false, nil
}
// hasScope returns true if the string has the given scope
func (bitmap accessTokenScopeBitmap) hasScope(scope AccessTokenScope) (bool, error) {
expectedBits, ok := allAccessTokenScopeBits[scope]

View File

@ -26,7 +26,7 @@
fork_id: 0
is_template: false
template_id: 0
size: 7320
size: 7597
is_fsck_enabled: true
close_issues_via_commit_in_any_branch: false

View File

@ -392,6 +392,13 @@ func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to str
return err
}
// 4.1 Update all not merged pull request head branch name
if _, err = sess.Table("pull_request").Where("head_repo_id=? AND head_branch=? AND has_merged=?",
repo.ID, from, false).
Update(map[string]any{"head_branch": to}); err != nil {
return err
}
// 5. insert renamed branch record
renamedBranch := &RenamedBranch{
RepoID: repo.ID,
@ -427,7 +434,8 @@ type RecentlyPushedNewBranch struct {
// FindRecentlyPushedNewBranches return at most 2 new branches pushed by the user in 2 hours which has no opened PRs created
// if opts.CommitAfterUnix is 0, we will find the branches that were committed to in the last 2 hours
// if opts.ListOptions is not set, we will only display top 2 latest branch
// if opts.ListOptions is not set, we will only display top 2 latest branches.
// Protected branches will be skipped since they are unlikely to be used to create new PRs.
func FindRecentlyPushedNewBranches(ctx context.Context, doer *user_model.User, opts *FindRecentlyPushedNewBranchesOptions) ([]*RecentlyPushedNewBranch, error) {
if doer == nil {
return []*RecentlyPushedNewBranch{}, nil
@ -486,6 +494,18 @@ func FindRecentlyPushedNewBranches(ctx context.Context, doer *user_model.User, o
opts.MaxCount = 2
}
for _, branch := range branches {
// whether the branch is protected
protected, err := IsBranchProtected(ctx, branch.RepoID, branch.Name)
if err != nil {
return nil, fmt.Errorf("IsBranchProtected: %v", err)
}
if protected {
// Skip protected branches,
// since updates to protected branches often come from PR merges,
// and they are unlikely to be used to create new PRs.
continue
}
// whether branch have already created PR
count, err := db.GetEngine(ctx).Table("pull_request").
// we should not only use branch name here, because if there are branches with same name in other repos,

View File

@ -6,6 +6,7 @@ package git
import (
"context"
"errors"
"fmt"
"strings"
"time"
@ -24,6 +25,7 @@ type LFSLock struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"INDEX NOT NULL"`
OwnerID int64 `xorm:"INDEX NOT NULL"`
Owner *user_model.User `xorm:"-"`
Path string `xorm:"TEXT"`
Created time.Time `xorm:"created"`
}
@ -37,6 +39,35 @@ func (l *LFSLock) BeforeInsert() {
l.Path = util.PathJoinRel(l.Path)
}
// LoadAttributes loads attributes of the lock.
func (l *LFSLock) LoadAttributes(ctx context.Context) error {
// Load owner
if err := l.LoadOwner(ctx); err != nil {
return fmt.Errorf("load owner: %w", err)
}
return nil
}
// LoadOwner loads owner of the lock.
func (l *LFSLock) LoadOwner(ctx context.Context) error {
if l.Owner != nil {
return nil
}
owner, err := user_model.GetUserByID(ctx, l.OwnerID)
if err != nil {
if user_model.IsErrUserNotExist(err) {
l.Owner = user_model.NewGhostUser()
return nil
}
return err
}
l.Owner = owner
return nil
}
// CreateLFSLock creates a new lock.
func CreateLFSLock(ctx context.Context, repo *repo_model.Repository, lock *LFSLock) (*LFSLock, error) {
dbCtx, committer, err := db.TxContext(ctx)
@ -94,7 +125,7 @@ func GetLFSLockByID(ctx context.Context, id int64) (*LFSLock, error) {
}
// GetLFSLockByRepoID returns a list of locks of repository.
func GetLFSLockByRepoID(ctx context.Context, repoID int64, page, pageSize int) ([]*LFSLock, error) {
func GetLFSLockByRepoID(ctx context.Context, repoID int64, page, pageSize int) (LFSLockList, error) {
e := db.GetEngine(ctx)
if page >= 0 && pageSize > 0 {
start := 0
@ -103,7 +134,7 @@ func GetLFSLockByRepoID(ctx context.Context, repoID int64, page, pageSize int) (
}
e.Limit(pageSize, start)
}
lfsLocks := make([]*LFSLock, 0, pageSize)
lfsLocks := make(LFSLockList, 0, pageSize)
return lfsLocks, e.Find(&lfsLocks, &LFSLock{RepoID: repoID})
}

View File

@ -0,0 +1,54 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package git
import (
"context"
"fmt"
"code.gitea.io/gitea/models/db"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
)
// LFSLockList is a list of LFSLock
type LFSLockList []*LFSLock
// LoadAttributes loads the attributes for the given locks
func (locks LFSLockList) LoadAttributes(ctx context.Context) error {
if len(locks) == 0 {
return nil
}
if err := locks.LoadOwner(ctx); err != nil {
return fmt.Errorf("load owner: %w", err)
}
return nil
}
// LoadOwner loads the owner of the locks
func (locks LFSLockList) LoadOwner(ctx context.Context) error {
if len(locks) == 0 {
return nil
}
usersIDs := container.FilterSlice(locks, func(lock *LFSLock) (int64, bool) {
return lock.OwnerID, true
})
users := make(map[int64]*user_model.User, len(usersIDs))
if err := db.GetEngine(ctx).
In("id", usersIDs).
Find(&users); err != nil {
return fmt.Errorf("find users: %w", err)
}
for _, v := range locks {
v.Owner = users[v.OwnerID]
if v.Owner == nil { // not exist
v.Owner = user_model.NewGhostUser()
}
}
return nil
}

View File

@ -222,6 +222,13 @@ func (r RoleInRepo) LocaleHelper(lang translation.Locale) string {
return lang.TrString("repo.issues.role." + string(r) + "_helper")
}
// CommentMetaData stores metadata for a comment, these data will not be changed once inserted into database
type CommentMetaData struct {
ProjectColumnID int64 `json:"project_column_id,omitempty"`
ProjectColumnTitle string `json:"project_column_title,omitempty"`
ProjectTitle string `json:"project_title,omitempty"`
}
// Comment represents a comment in commit and issue page.
type Comment struct {
ID int64 `xorm:"pk autoincr"`
@ -295,6 +302,8 @@ type Comment struct {
RefAction references.XRefAction `xorm:"SMALLINT"` // What happens if RefIssueID resolves
RefIsPull bool
CommentMetaData *CommentMetaData `xorm:"JSON TEXT"` // put all non-index metadata in a single field
RefRepo *repo_model.Repository `xorm:"-"`
RefIssue *Issue `xorm:"-"`
RefComment *Comment `xorm:"-"`
@ -797,6 +806,15 @@ func CreateComment(ctx context.Context, opts *CreateCommentOptions) (_ *Comment,
LabelID = opts.Label.ID
}
var commentMetaData *CommentMetaData
if opts.ProjectColumnTitle != "" {
commentMetaData = &CommentMetaData{
ProjectColumnID: opts.ProjectColumnID,
ProjectColumnTitle: opts.ProjectColumnTitle,
ProjectTitle: opts.ProjectTitle,
}
}
comment := &Comment{
Type: opts.Type,
PosterID: opts.Doer.ID,
@ -830,6 +848,7 @@ func CreateComment(ctx context.Context, opts *CreateCommentOptions) (_ *Comment,
RefIsPull: opts.RefIsPull,
IsForcePush: opts.IsForcePush,
Invalidated: opts.Invalidated,
CommentMetaData: commentMetaData,
}
if _, err = e.Insert(comment); err != nil {
return nil, err
@ -987,6 +1006,9 @@ type CreateCommentOptions struct {
MilestoneID int64
OldProjectID int64
ProjectID int64
ProjectTitle string
ProjectColumnID int64
ProjectColumnTitle string
TimeID int64
AssigneeID int64
AssigneeTeamID int64

View File

@ -441,6 +441,7 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er
Join("INNER", "issue", "issue.id = comment.issue_id").
In("issue.id", issuesIDs[:limit]).
Where(cond).
NoAutoCondition().
Rows(new(Comment))
if err != nil {
return err

View File

@ -165,6 +165,7 @@ type PullRequest struct {
Issue *Issue `xorm:"-"`
Index int64
RequestedReviewers []*user_model.User `xorm:"-"`
RequestedReviewersTeams []*org_model.Team `xorm:"-"`
isRequestedReviewersLoaded bool `xorm:"-"`
HeadRepoID int64 `xorm:"INDEX"`
@ -305,8 +306,29 @@ func (pr *PullRequest) LoadRequestedReviewers(ctx context.Context) error {
}
pr.isRequestedReviewersLoaded = true
for _, review := range reviews {
if review.ReviewerID != 0 {
pr.RequestedReviewers = append(pr.RequestedReviewers, review.Reviewer)
}
}
return nil
}
// LoadRequestedReviewersTeams loads the requested reviewers teams.
func (pr *PullRequest) LoadRequestedReviewersTeams(ctx context.Context) error {
reviews, err := GetReviewsByIssueID(ctx, pr.Issue.ID)
if err != nil {
return err
}
if err = reviews.LoadReviewersTeams(ctx); err != nil {
return err
}
for _, review := range reviews {
if review.ReviewerTeamID != 0 {
pr.RequestedReviewersTeams = append(pr.RequestedReviewersTeams, review.ReviewerTeam)
}
}
return nil
}

View File

@ -214,9 +214,13 @@ func (r *Review) LoadAttributes(ctx context.Context) (err error) {
return err
}
// HTMLTypeColorName returns the color used in the ui indicating the review
func (r *Review) HTMLTypeColorName() string {
switch r.Type {
case ReviewTypeApprove:
if !r.Official {
return "grey"
}
if r.Stale {
return "yellow"
}
@ -231,6 +235,27 @@ func (r *Review) HTMLTypeColorName() string {
return "grey"
}
// TooltipContent returns the locale string describing the review type
func (r *Review) TooltipContent() string {
switch r.Type {
case ReviewTypeApprove:
if r.Stale {
return "repo.issues.review.stale"
}
if !r.Official {
return "repo.issues.review.unofficial"
}
return "repo.issues.review.official"
case ReviewTypeComment:
return "repo.issues.review.comment"
case ReviewTypeReject:
return "repo.issues.review.rejected"
case ReviewTypeRequest:
return "repo.issues.review.requested"
}
return ""
}
// GetReviewByID returns the review by the given ID
func GetReviewByID(ctx context.Context, id int64) (*Review, error) {
review := new(Review)

View File

@ -7,6 +7,7 @@ import (
"context"
"code.gitea.io/gitea/models/db"
organization_model "code.gitea.io/gitea/models/organization"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/optional"
@ -37,6 +38,34 @@ func (reviews ReviewList) LoadReviewers(ctx context.Context) error {
return nil
}
// LoadReviewersTeams loads reviewers teams
func (reviews ReviewList) LoadReviewersTeams(ctx context.Context) error {
reviewersTeamsIDs := make([]int64, 0)
for _, review := range reviews {
if review.ReviewerTeamID != 0 {
reviewersTeamsIDs = append(reviewersTeamsIDs, review.ReviewerTeamID)
}
}
teamsMap := make(map[int64]*organization_model.Team, 0)
for _, teamID := range reviewersTeamsIDs {
team, err := organization_model.GetTeamByID(ctx, teamID)
if err != nil {
return err
}
teamsMap[teamID] = team
}
for _, review := range reviews {
if review.ReviewerTeamID != 0 {
review.ReviewerTeam = teamsMap[review.ReviewerTeamID]
}
}
return nil
}
func (reviews ReviewList) LoadIssues(ctx context.Context) error {
issueIDs := container.FilterSlice(reviews, func(review *Review) (int64, bool) {
return review.IssueID, true

View File

@ -595,6 +595,12 @@ var migrations = []Migration{
NewMigration("Add force-push branch protection support", v1_23.AddForcePushBranchProtection),
// v301 -> v302
NewMigration("Add skip_secondary_authorization option to oauth2 application table", v1_23.AddSkipSecondaryAuthColumnToOAuth2ApplicationTable),
// v302 -> v303
NewMigration("Add index to action_task stopped log_expired", v1_23.AddIndexToActionTaskStoppedLogExpired),
// v303 -> v304
NewMigration("Add metadata column for comment table", v1_23.AddCommentMetaDataColumn),
// v304 -> v305
NewMigration("Add index for release sha1", v1_23.AddIndexForReleaseSha1),
}
// GetCurrentDBVersion returns the current db version

View File

@ -0,0 +1,18 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_23 //nolint
import (
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm"
)
func AddIndexToActionTaskStoppedLogExpired(x *xorm.Engine) error {
type ActionTask struct {
Stopped timeutil.TimeStamp `xorm:"index(stopped_log_expired)"`
LogExpired bool `xorm:"index(stopped_log_expired)"`
}
return x.Sync(new(ActionTask))
}

View File

@ -0,0 +1,23 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_23 //nolint
import (
"xorm.io/xorm"
)
// CommentMetaData stores metadata for a comment, these data will not be changed once inserted into database
type CommentMetaData struct {
ProjectColumnID int64 `json:"project_column_id"`
ProjectColumnTitle string `json:"project_column_title"`
ProjectTitle string `json:"project_title"`
}
func AddCommentMetaDataColumn(x *xorm.Engine) error {
type Comment struct {
CommentMetaData *CommentMetaData `xorm:"JSON TEXT"` // put all non-index metadata in a single field
}
return x.Sync(new(Comment))
}

View File

@ -0,0 +1,13 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_23 //nolint
import "xorm.io/xorm"
func AddIndexForReleaseSha1(x *xorm.Engine) error {
type Release struct {
Sha1 string `xorm:"INDEX VARCHAR(64)"`
}
return x.Sync(new(Release))
}

View File

@ -76,30 +76,6 @@ func (p *Project) NumOpenIssues(ctx context.Context) int {
return int(c)
}
// MoveIssuesOnProjectColumn moves or keeps issues in a column and sorts them inside that column
func MoveIssuesOnProjectColumn(ctx context.Context, column *Column, sortedIssueIDs map[int64]int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
issueIDs := util.ValuesOfMap(sortedIssueIDs)
count, err := sess.Table(new(ProjectIssue)).Where("project_id=?", column.ProjectID).In("issue_id", issueIDs).Count()
if err != nil {
return err
}
if int(count) != len(sortedIssueIDs) {
return fmt.Errorf("all issues have to be added to a project first")
}
for sorting, issueID := range sortedIssueIDs {
_, err = sess.Exec("UPDATE `project_issue` SET project_board_id=?, sorting=? WHERE issue_id=?", column.ID, sorting, issueID)
if err != nil {
return err
}
}
return nil
})
}
func (c *Column) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Column) error {
if c.ProjectID != newColumn.ProjectID {
return fmt.Errorf("columns have to be in the same project")
@ -141,3 +117,9 @@ func (c *Column) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Colum
return nil
})
}
// DeleteAllProjectIssueByIssueIDsAndProjectIDs delete all project's issues by issue's and project's ids
func DeleteAllProjectIssueByIssueIDsAndProjectIDs(ctx context.Context, issueIDs, projectIDs []int64) error {
_, err := db.GetEngine(ctx).In("project_id", projectIDs).In("issue_id", issueIDs).Delete(&ProjectIssue{})
return err
}

View File

@ -296,6 +296,12 @@ func GetProjectForRepoByID(ctx context.Context, repoID, id int64) (*Project, err
return p, nil
}
// GetAllProjectsIDsByOwnerID returns the all projects ids it owns
func GetAllProjectsIDsByOwnerIDAndType(ctx context.Context, ownerID int64, projectType Type) ([]int64, error) {
projects := make([]int64, 0)
return projects, db.GetEngine(ctx).Table(&Project{}).Where("owner_id=? AND type=?", ownerID, projectType).Cols("id").Find(&projects)
}
// UpdateProject updates project properties
func UpdateProject(ctx context.Context, p *Project) error {
if !IsCardTypeValid(p.CardType) {

View File

@ -77,7 +77,7 @@ type Release struct {
Target string
TargetBehind string `xorm:"-"` // to handle non-existing or empty target
Title string
Sha1 string `xorm:"VARCHAR(64)"`
Sha1 string `xorm:"INDEX VARCHAR(64)"`
NumCommits int64
NumCommitsBehind int64 `xorm:"-"`
Note string `xorm:"TEXT"`
@ -398,32 +398,6 @@ func GetReleaseAttachments(ctx context.Context, rels ...*Release) (err error) {
return err
}
type releaseSorter struct {
rels []*Release
}
func (rs *releaseSorter) Len() int {
return len(rs.rels)
}
func (rs *releaseSorter) Less(i, j int) bool {
diffNum := rs.rels[i].NumCommits - rs.rels[j].NumCommits
if diffNum != 0 {
return diffNum > 0
}
return rs.rels[i].CreatedUnix > rs.rels[j].CreatedUnix
}
func (rs *releaseSorter) Swap(i, j int) {
rs.rels[i], rs.rels[j] = rs.rels[j], rs.rels[i]
}
// SortReleases sorts releases by number of commits and created time.
func SortReleases(rels []*Release) {
sorter := &releaseSorter{rels: rels}
sort.Sort(sorter)
}
// UpdateReleasesMigrationsByType updates all migrated repositories' releases from gitServiceType to replace originalAuthorID to posterID
func UpdateReleasesMigrationsByType(ctx context.Context, gitServiceType structs.GitServiceType, originalAuthorID string, posterID int64) error {
_, err := db.GetEngine(ctx).Table("release").
@ -563,3 +537,17 @@ func InsertReleases(ctx context.Context, rels ...*Release) error {
return committer.Commit()
}
func FindTagsByCommitIDs(ctx context.Context, repoID int64, commitIDs ...string) (map[string][]*Release, error) {
releases := make([]*Release, 0, len(commitIDs))
if err := db.GetEngine(ctx).Where("repo_id=?", repoID).
In("sha1", commitIDs).
Find(&releases); err != nil {
return nil, err
}
res := make(map[string][]*Release, len(releases))
for _, r := range releases {
res[r.Sha1] = append(res[r.Sha1], r)
}
return res, nil
}

View File

@ -25,3 +25,16 @@ func TestMigrate_InsertReleases(t *testing.T) {
err := InsertReleases(db.DefaultContext, r)
assert.NoError(t, err)
}
func Test_FindTagsByCommitIDs(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
sha1Rels, err := FindTagsByCommitIDs(db.DefaultContext, 1, "65f1bf27bc3bf70f64657658635e66094edbcb4d")
assert.NoError(t, err)
assert.Len(t, sha1Rels, 1)
rels := sha1Rels["65f1bf27bc3bf70f64657658635e66094edbcb4d"]
assert.Len(t, rels, 3)
assert.Equal(t, "v1.1", rels[0].TagName)
assert.Equal(t, "delete-tag", rels[1].TagName)
assert.Equal(t, "v1.0", rels[2].TagName)
}

View File

@ -5,7 +5,6 @@ package secret
import (
"context"
"errors"
"fmt"
"strings"
@ -22,6 +21,19 @@ import (
)
// Secret represents a secret
//
// It can be:
// 1. org/user level secret, OwnerID is org/user ID and RepoID is 0
// 2. repo level secret, OwnerID is 0 and RepoID is repo ID
//
// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
// or it will be complicated to find secrets belonging to a specific owner.
// For example, conditions like `OwnerID = 1` will also return secret {OwnerID: 1, RepoID: 1},
// but it's a repo level secret, not an org/user level secret.
// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level secrets.
//
// Please note that it's not acceptable to have both OwnerID and RepoID to zero, global secrets are not supported.
// It's for security reasons, admin may be not aware of that the secrets could be stolen by any user when setting them as global.
type Secret struct {
ID int64
OwnerID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL"`
@ -46,6 +58,15 @@ func (err ErrSecretNotFound) Unwrap() error {
// InsertEncryptedSecret Creates, encrypts, and validates a new secret with yet unencrypted data and insert into database
func InsertEncryptedSecret(ctx context.Context, ownerID, repoID int64, name, data string) (*Secret, error) {
if ownerID != 0 && repoID != 0 {
// It's trying to create a secret that belongs to a repository, but OwnerID has been set accidentally.
// Remove OwnerID to avoid confusion; it's not worth returning an error here.
ownerID = 0
}
if ownerID == 0 && repoID == 0 {
return nil, fmt.Errorf("%w: ownerID and repoID cannot be both zero, global secrets are not supported", util.ErrInvalidArgument)
}
encrypted, err := secret_module.EncryptSecret(setting.SecretKey, data)
if err != nil {
return nil, err
@ -56,9 +77,6 @@ func InsertEncryptedSecret(ctx context.Context, ownerID, repoID int64, name, dat
Name: strings.ToUpper(name),
Data: encrypted,
}
if err := secret.Validate(); err != nil {
return secret, err
}
return secret, db.Insert(ctx, secret)
}
@ -66,29 +84,25 @@ func init() {
db.RegisterModel(new(Secret))
}
func (s *Secret) Validate() error {
if s.OwnerID == 0 && s.RepoID == 0 {
return errors.New("the secret is not bound to any scope")
}
return nil
}
type FindSecretsOptions struct {
db.ListOptions
OwnerID int64
RepoID int64
OwnerID int64 // it will be ignored if RepoID is set
SecretID int64
Name string
}
func (opts FindSecretsOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
if opts.RepoID != 0 { // if RepoID is set
// ignore OwnerID and treat it as 0
cond = cond.And(builder.Eq{"owner_id": 0})
} else {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
}
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.SecretID != 0 {
cond = cond.And(builder.Eq{"id": opts.SecretID})
}

View File

@ -15,6 +15,7 @@ import (
"code.gitea.io/gitea/models/dbfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/zstd"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"google.golang.org/protobuf/types/known/timestamppb"
@ -28,6 +29,9 @@ const (
defaultBufSize = MaxLineSize
)
// WriteLogs appends logs to DBFS file for temporary storage.
// It doesn't respect the file format in the filename like ".zst", since it's difficult to reopen a closed compressed file and append new content.
// Why doesn't it store logs in object storage directly? Because it's not efficient to append content to object storage.
func WriteLogs(ctx context.Context, filename string, offset int64, rows []*runnerv1.LogRow) ([]int, error) {
flag := os.O_WRONLY
if offset == 0 {
@ -106,6 +110,17 @@ func ReadLogs(ctx context.Context, inStorage bool, filename string, offset, limi
return rows, nil
}
const (
// logZstdBlockSize is the block size for zstd compression.
// 128KB leads the compression ratio to be close to the regular zstd compression.
// And it means each read from the underlying object storage will be at least 128KB*(compression ratio).
// The compression ratio is about 30% for text files, so the actual read size is about 38KB, which should be acceptable.
logZstdBlockSize = 128 * 1024 // 128KB
)
// TransferLogs transfers logs from DBFS to object storage.
// It happens when the file is complete and no more logs will be appended.
// It respects the file format in the filename like ".zst", and compresses the content if needed.
func TransferLogs(ctx context.Context, filename string) (func(), error) {
name := DBFSPrefix + filename
remove := func() {
@ -119,7 +134,26 @@ func TransferLogs(ctx context.Context, filename string) (func(), error) {
}
defer f.Close()
if _, err := storage.Actions.Save(filename, f, -1); err != nil {
var reader io.Reader = f
if strings.HasSuffix(filename, ".zst") {
r, w := io.Pipe()
reader = r
zstdWriter, err := zstd.NewSeekableWriter(w, logZstdBlockSize)
if err != nil {
return nil, fmt.Errorf("zstd NewSeekableWriter: %w", err)
}
go func() {
defer func() {
_ = w.CloseWithError(zstdWriter.Close())
}()
if _, err := io.Copy(zstdWriter, f); err != nil {
_ = w.CloseWithError(err)
return
}
}()
}
if _, err := storage.Actions.Save(filename, reader, -1); err != nil {
return nil, fmt.Errorf("storage save %q: %w", filename, err)
}
return remove, nil
@ -150,11 +184,22 @@ func OpenLogs(ctx context.Context, inStorage bool, filename string) (io.ReadSeek
}
return f, nil
}
f, err := storage.Actions.Open(filename)
if err != nil {
return nil, fmt.Errorf("storage open %q: %w", filename, err)
}
return f, nil
var reader io.ReadSeekCloser = f
if strings.HasSuffix(filename, ".zst") {
r, err := zstd.NewSeekableReader(f)
if err != nil {
return nil, fmt.Errorf("zstd NewSeekableReader: %w", err)
}
reader = r
}
return reader, nil
}
func FormatLog(timestamp time.Time, content string) string {

View File

@ -18,7 +18,7 @@ import (
"code.gitea.io/gitea/modules/proxy"
"code.gitea.io/gitea/modules/setting"
"github.com/go-fed/httpsig"
"github.com/42wim/httpsig"
)
const (

View File

@ -10,7 +10,7 @@ import (
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
_ "code.gitea.io/gitea/models" // https://discourse.gitea.io/t/testfixtures-could-not-clean-table-access-no-such-table-access/4137/4
_ "code.gitea.io/gitea/models" // https://forum.gitea.com/t/testfixtures-could-not-clean-table-access-no-such-table-access/4137/4
"github.com/stretchr/testify/assert"
)

46
modules/git/batch.go Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package git
import (
"bufio"
"context"
)
type Batch struct {
cancel context.CancelFunc
Reader *bufio.Reader
Writer WriteCloserError
}
func (repo *Repository) NewBatch(ctx context.Context) (*Batch, error) {
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
if err := ensureValidGitRepository(ctx, repo.Path); err != nil {
return nil, err
}
var batch Batch
batch.Writer, batch.Reader, batch.cancel = catFileBatch(ctx, repo.Path)
return &batch, nil
}
func (repo *Repository) NewBatchCheck(ctx context.Context) (*Batch, error) {
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
if err := ensureValidGitRepository(ctx, repo.Path); err != nil {
return nil, err
}
var check Batch
check.Writer, check.Reader, check.cancel = catFileBatchCheck(ctx, repo.Path)
return &check, nil
}
func (b *Batch) Close() {
if b.cancel != nil {
b.cancel()
b.Reader = nil
b.Writer = nil
b.cancel = nil
}
}

View File

@ -26,10 +26,10 @@ type WriteCloserError interface {
CloseWithError(err error) error
}
// EnsureValidGitRepository runs git rev-parse in the repository path - thus ensuring that the repository is a valid repository.
// ensureValidGitRepository runs git rev-parse in the repository path - thus ensuring that the repository is a valid repository.
// Run before opening git cat-file.
// This is needed otherwise the git cat-file will hang for invalid repositories.
func EnsureValidGitRepository(ctx context.Context, repoPath string) error {
func ensureValidGitRepository(ctx context.Context, repoPath string) error {
stderr := strings.Builder{}
err := NewCommand(ctx, "rev-parse").
SetDescription(fmt.Sprintf("%s rev-parse [repo_path: %s]", GitExecutable, repoPath)).
@ -43,8 +43,8 @@ func EnsureValidGitRepository(ctx context.Context, repoPath string) error {
return nil
}
// CatFileBatchCheck opens git cat-file --batch-check in the provided repo and returns a stdin pipe, a stdout reader and cancel function
func CatFileBatchCheck(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
// catFileBatchCheck opens git cat-file --batch-check in the provided repo and returns a stdin pipe, a stdout reader and cancel function
func catFileBatchCheck(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
batchStdinReader, batchStdinWriter := io.Pipe()
batchStdoutReader, batchStdoutWriter := io.Pipe()
ctx, ctxCancel := context.WithCancel(ctx)
@ -93,8 +93,8 @@ func CatFileBatchCheck(ctx context.Context, repoPath string) (WriteCloserError,
return batchStdinWriter, batchReader, cancel
}
// CatFileBatch opens git cat-file --batch in the provided repo and returns a stdin pipe, a stdout reader and cancel function
func CatFileBatch(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
// catFileBatch opens git cat-file --batch in the provided repo and returns a stdin pipe, a stdout reader and cancel function
func catFileBatch(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
// We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary.
// so let's create a batch stdin and stdout
batchStdinReader, batchStdinWriter := io.Pipe()

View File

@ -14,6 +14,11 @@ func TestReadingBlameOutputSha256(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if isGogit {
t.Skip("Skipping test since gogit does not support sha256")
return
}
t.Run("Without .git-blame-ignore-revs", func(t *testing.T) {
repo, err := OpenRepository(ctx, "./tests/repos/repo5_pulls_sha256")
assert.NoError(t, err)

View File

@ -26,9 +26,12 @@ type Blob struct {
// DataAsync gets a ReadCloser for the contents of a blob without reading it all.
// Calling the Close function on the result will discard all unread output.
func (b *Blob) DataAsync() (io.ReadCloser, error) {
wr, rd, cancel := b.repo.CatFileBatch(b.repo.Ctx)
wr, rd, cancel, err := b.repo.CatFileBatch(b.repo.Ctx)
if err != nil {
return nil, err
}
_, err := wr.Write([]byte(b.ID.String() + "\n"))
_, err = wr.Write([]byte(b.ID.String() + "\n"))
if err != nil {
cancel()
return nil, err
@ -64,9 +67,13 @@ func (b *Blob) Size() int64 {
return b.size
}
wr, rd, cancel := b.repo.CatFileBatchCheck(b.repo.Ctx)
wr, rd, cancel, err := b.repo.CatFileBatchCheck(b.repo.Ctx)
if err != nil {
log.Debug("error whilst reading size for %s in %s. Error: %v", b.ID.String(), b.repo.Path, err)
return 0
}
defer cancel()
_, err := wr.Write([]byte(b.ID.String() + "\n"))
_, err = wr.Write([]byte(b.ID.String() + "\n"))
if err != nil {
log.Debug("error whilst reading size for %s in %s. Error: %v", b.ID.String(), b.repo.Path, err)
return 0

View File

@ -124,7 +124,10 @@ func GetLastCommitForPaths(ctx context.Context, commit *Commit, treePath string,
return nil, err
}
batchStdinWriter, batchReader, cancel := commit.repo.CatFileBatch(ctx)
batchStdinWriter, batchReader, cancel, err := commit.repo.CatFileBatch(ctx)
if err != nil {
return nil, err
}
defer cancel()
commitsMap := map[string]*Commit{}

View File

@ -4,6 +4,8 @@
package git
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
@ -345,3 +347,18 @@ func TestGetCommitFileStatusMerges(t *testing.T) {
assert.Equal(t, commitFileStatus.Removed, expected.Removed)
assert.Equal(t, commitFileStatus.Modified, expected.Modified)
}
func Test_GetCommitBranchStart(t *testing.T) {
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
repo, err := OpenRepository(context.Background(), bareRepo1Path)
assert.NoError(t, err)
defer repo.Close()
commit, err := repo.GetBranchCommit("branch1")
assert.NoError(t, err)
assert.EqualValues(t, "2839944139e0de9737a044f78b0e4b40d989a9e3", commit.ID.String())
startCommitID, err := repo.GetCommitBranchStart(os.Environ(), "branch1", commit.ID.String())
assert.NoError(t, err)
assert.NotEmpty(t, startCommitID)
assert.EqualValues(t, "9c9aef8dd84e02bc7ec12641deb4c930a7c30185", startCommitID)
}

View File

@ -271,7 +271,17 @@ func CutDiffAroundLine(originalDiff io.Reader, line int64, old bool, numbersOfLi
}
// GetAffectedFiles returns the affected files between two commits
func GetAffectedFiles(repo *Repository, oldCommitID, newCommitID string, env []string) ([]string, error) {
func GetAffectedFiles(repo *Repository, branchName, oldCommitID, newCommitID string, env []string) ([]string, error) {
if oldCommitID == emptySha1ObjectID.String() || oldCommitID == emptySha256ObjectID.String() {
startCommitID, err := repo.GetCommitBranchStart(env, branchName, newCommitID)
if err != nil {
return nil, err
}
if startCommitID == "" {
return nil, fmt.Errorf("cannot find the start commit of %s", newCommitID)
}
oldCommitID = startCommitID
}
stdoutReader, stdoutWriter, err := os.Pipe()
if err != nil {
log.Error("Unable to create os.Pipe for %s", repo.Path)

View File

@ -46,7 +46,10 @@ func FindLFSFile(repo *git.Repository, objectID git.ObjectID) ([]*LFSResult, err
// Next feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary.
// so let's create a batch stdin and stdout
batchStdinWriter, batchReader, cancel := repo.CatFileBatch(repo.Ctx)
batchStdinWriter, batchReader, cancel, err := repo.CatFileBatch(repo.Ctx)
if err != nil {
return nil, err
}
defer cancel()
// We'll use a scanner for the revList because it's simpler than a bufio.Reader

View File

@ -26,14 +26,10 @@ type Repository struct {
gpgSettings *GPGSettings
batchInUse bool
batchCancel context.CancelFunc
batchReader *bufio.Reader
batchWriter WriteCloserError
batch *Batch
checkInUse bool
checkCancel context.CancelFunc
checkReader *bufio.Reader
checkWriter WriteCloserError
check *Batch
Ctx context.Context
LastCommitCache *LastCommitCache
@ -55,63 +51,75 @@ func OpenRepository(ctx context.Context, repoPath string) (*Repository, error) {
return nil, util.NewNotExistErrorf("no such file or directory")
}
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
if err := EnsureValidGitRepository(ctx, repoPath); err != nil {
return nil, err
}
repo := &Repository{
return &Repository{
Path: repoPath,
tagCache: newObjectCache(),
Ctx: ctx,
}
repo.batchWriter, repo.batchReader, repo.batchCancel = CatFileBatch(ctx, repoPath)
repo.checkWriter, repo.checkReader, repo.checkCancel = CatFileBatchCheck(ctx, repoPath)
return repo, nil
}, nil
}
// CatFileBatch obtains a CatFileBatch for this repository
func (repo *Repository) CatFileBatch(ctx context.Context) (WriteCloserError, *bufio.Reader, func()) {
if repo.batchCancel == nil || repo.batchInUse {
log.Debug("Opening temporary cat file batch for: %s", repo.Path)
return CatFileBatch(ctx, repo.Path)
}
repo.batchInUse = true
return repo.batchWriter, repo.batchReader, func() {
repo.batchInUse = false
func (repo *Repository) CatFileBatch(ctx context.Context) (WriteCloserError, *bufio.Reader, func(), error) {
if repo.batch == nil {
var err error
repo.batch, err = repo.NewBatch(ctx)
if err != nil {
return nil, nil, nil, err
}
}
if !repo.batchInUse {
repo.batchInUse = true
return repo.batch.Writer, repo.batch.Reader, func() {
repo.batchInUse = false
}, nil
}
log.Debug("Opening temporary cat file batch for: %s", repo.Path)
tempBatch, err := repo.NewBatch(ctx)
if err != nil {
return nil, nil, nil, err
}
return tempBatch.Writer, tempBatch.Reader, tempBatch.Close, nil
}
// CatFileBatchCheck obtains a CatFileBatchCheck for this repository
func (repo *Repository) CatFileBatchCheck(ctx context.Context) (WriteCloserError, *bufio.Reader, func()) {
if repo.checkCancel == nil || repo.checkInUse {
log.Debug("Opening temporary cat file batch-check for: %s", repo.Path)
return CatFileBatchCheck(ctx, repo.Path)
func (repo *Repository) CatFileBatchCheck(ctx context.Context) (WriteCloserError, *bufio.Reader, func(), error) {
if repo.check == nil {
var err error
repo.check, err = repo.NewBatchCheck(ctx)
if err != nil {
return nil, nil, nil, err
}
}
if !repo.checkInUse {
repo.checkInUse = true
return repo.checkWriter, repo.checkReader, func() {
return repo.check.Writer, repo.check.Reader, func() {
repo.checkInUse = false
}, nil
}
log.Debug("Opening temporary cat file batch-check for: %s", repo.Path)
tempBatchCheck, err := repo.NewBatchCheck(ctx)
if err != nil {
return nil, nil, nil, err
}
return tempBatchCheck.Writer, tempBatchCheck.Reader, tempBatchCheck.Close, nil
}
func (repo *Repository) Close() error {
if repo == nil {
return nil
}
if repo.batchCancel != nil {
repo.batchCancel()
repo.batchReader = nil
repo.batchWriter = nil
repo.batchCancel = nil
if repo.batch != nil {
repo.batch.Close()
repo.batch = nil
repo.batchInUse = false
}
if repo.checkCancel != nil {
repo.checkCancel()
repo.checkCancel = nil
repo.checkReader = nil
repo.checkWriter = nil
if repo.check != nil {
repo.check.Close()
repo.check = nil
repo.checkInUse = false
}
repo.LastCommitCache = nil

View File

@ -14,30 +14,35 @@ import (
"github.com/go-git/go-git/v5/plumbing/storer"
)
// IsObjectExist returns true if given reference exists in the repository.
// IsObjectExist returns true if the given object exists in the repository.
// FIXME: Inconsistent behavior with nogogit edition
// Unlike the implementation of IsObjectExist in nogogit edition, it does not support short hashes here.
// For example, IsObjectExist("153f451") will return false, but it will return true in nogogit edition.
// To fix this, the solution could be adding support for short hashes in gogit edition if it's really needed.
func (repo *Repository) IsObjectExist(name string) bool {
if name == "" {
return false
}
_, err := repo.gogitRepo.Object(plumbing.AnyObject, plumbing.NewHash(name))
return err == nil
}
// IsReferenceExist returns true if given reference exists in the repository.
// FIXME: Inconsistent behavior with nogogit edition
// Unlike the implementation of IsObjectExist in nogogit edition, it does not support blob hashes here.
// For example, IsObjectExist([existing_blob_hash]) will return false, but it will return true in nogogit edition.
// To fix this, the solution could be refusing to support blob hashes in nogogit edition since a blob hash is not a reference.
func (repo *Repository) IsReferenceExist(name string) bool {
if name == "" {
return false
}
_, err := repo.gogitRepo.ResolveRevision(plumbing.Revision(name))
return err == nil
}
// IsReferenceExist returns true if given reference exists in the repository.
func (repo *Repository) IsReferenceExist(name string) bool {
if name == "" {
return false
}
reference, err := repo.gogitRepo.Reference(plumbing.ReferenceName(name), true)
if err != nil {
return false
}
return reference.Type() != plumbing.InvalidReference
}
// IsBranchExist returns true if given branch exists in current repository.
func (repo *Repository) IsBranchExist(name string) bool {
if name == "" {

View File

@ -16,15 +16,19 @@ import (
"code.gitea.io/gitea/modules/log"
)
// IsObjectExist returns true if given reference exists in the repository.
// IsObjectExist returns true if the given object exists in the repository.
func (repo *Repository) IsObjectExist(name string) bool {
if name == "" {
return false
}
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
if err != nil {
log.Debug("Error writing to CatFileBatchCheck %v", err)
return false
}
defer cancel()
_, err := wr.Write([]byte(name + "\n"))
_, err = wr.Write([]byte(name + "\n"))
if err != nil {
log.Debug("Error writing to CatFileBatchCheck %v", err)
return false
@ -39,9 +43,13 @@ func (repo *Repository) IsReferenceExist(name string) bool {
return false
}
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
if err != nil {
log.Debug("Error writing to CatFileBatchCheck %v", err)
return false
}
defer cancel()
_, err := wr.Write([]byte(name + "\n"))
_, err = wr.Write([]byte(name + "\n"))
if err != nil {
log.Debug("Error writing to CatFileBatchCheck %v", err)
return false

View File

@ -8,6 +8,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRepository_GetBranches(t *testing.T) {
@ -94,3 +95,107 @@ func BenchmarkGetRefsBySha(b *testing.B) {
_, _ = bareRepo5.GetRefsBySha("c83380d7056593c51a699d12b9c00627bd5743e9", "")
_, _ = bareRepo5.GetRefsBySha("58a4bcc53ac13e7ff76127e0fb518b5262bf09af", "")
}
func TestRepository_IsObjectExist(t *testing.T) {
repo, err := openRepositoryWithDefaultContext(filepath.Join(testReposDir, "repo1_bare"))
require.NoError(t, err)
defer repo.Close()
// FIXME: Inconsistent behavior between gogit and nogogit editions
// See the comment of IsObjectExist in gogit edition for more details.
supportShortHash := !isGogit
tests := []struct {
name string
arg string
want bool
}{
{
name: "empty",
arg: "",
want: false,
},
{
name: "branch",
arg: "master",
want: false,
},
{
name: "commit hash",
arg: "ce064814f4a0d337b333e646ece456cd39fab612",
want: true,
},
{
name: "short commit hash",
arg: "ce06481",
want: supportShortHash,
},
{
name: "blob hash",
arg: "153f451b9ee7fa1da317ab17a127e9fd9d384310",
want: true,
},
{
name: "short blob hash",
arg: "153f451",
want: supportShortHash,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, repo.IsObjectExist(tt.arg))
})
}
}
func TestRepository_IsReferenceExist(t *testing.T) {
repo, err := openRepositoryWithDefaultContext(filepath.Join(testReposDir, "repo1_bare"))
require.NoError(t, err)
defer repo.Close()
// FIXME: Inconsistent behavior between gogit and nogogit editions
// See the comment of IsReferenceExist in gogit edition for more details.
supportBlobHash := !isGogit
tests := []struct {
name string
arg string
want bool
}{
{
name: "empty",
arg: "",
want: false,
},
{
name: "branch",
arg: "master",
want: true,
},
{
name: "commit hash",
arg: "ce064814f4a0d337b333e646ece456cd39fab612",
want: true,
},
{
name: "short commit hash",
arg: "ce06481",
want: true,
},
{
name: "blob hash",
arg: "153f451b9ee7fa1da317ab17a127e9fd9d384310",
want: supportBlobHash,
},
{
name: "short blob hash",
arg: "153f451",
want: supportBlobHash,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, repo.IsReferenceExist(tt.arg))
})
}
}

View File

@ -7,6 +7,7 @@ package git
import (
"bytes"
"io"
"os"
"strconv"
"strings"
@ -414,7 +415,7 @@ func (repo *Repository) commitsBefore(id ObjectID, limit int) ([]*Commit, error)
commits := make([]*Commit, 0, len(formattedLog))
for _, commit := range formattedLog {
branches, err := repo.getBranches(commit, 2)
branches, err := repo.getBranches(os.Environ(), commit.ID.String(), 2)
if err != nil {
return nil, err
}
@ -437,12 +438,15 @@ func (repo *Repository) getCommitsBeforeLimit(id ObjectID, num int) ([]*Commit,
return repo.commitsBefore(id, num)
}
func (repo *Repository) getBranches(commit *Commit, limit int) ([]string, error) {
func (repo *Repository) getBranches(env []string, commitID string, limit int) ([]string, error) {
if DefaultFeatures().CheckVersionAtLeast("2.7.0") {
stdout, _, err := NewCommand(repo.Ctx, "for-each-ref", "--format=%(refname:strip=2)").
AddOptionFormat("--count=%d", limit).
AddOptionValues("--contains", commit.ID.String(), BranchPrefix).
RunStdString(&RunOpts{Dir: repo.Path})
AddOptionValues("--contains", commitID, BranchPrefix).
RunStdString(&RunOpts{
Dir: repo.Path,
Env: env,
})
if err != nil {
return nil, err
}
@ -451,7 +455,10 @@ func (repo *Repository) getBranches(commit *Commit, limit int) ([]string, error)
return branches, nil
}
stdout, _, err := NewCommand(repo.Ctx, "branch").AddOptionValues("--contains", commit.ID.String()).RunStdString(&RunOpts{Dir: repo.Path})
stdout, _, err := NewCommand(repo.Ctx, "branch").AddOptionValues("--contains", commitID).RunStdString(&RunOpts{
Dir: repo.Path,
Env: env,
})
if err != nil {
return nil, err
}
@ -513,3 +520,35 @@ func (repo *Repository) AddLastCommitCache(cacheKey, fullName, sha string) error
}
return nil
}
func (repo *Repository) GetCommitBranchStart(env []string, branch, endCommitID string) (string, error) {
cmd := NewCommand(repo.Ctx, "log", prettyLogFormat)
cmd.AddDynamicArguments(endCommitID)
stdout, _, runErr := cmd.RunStdBytes(&RunOpts{
Dir: repo.Path,
Env: env,
})
if runErr != nil {
return "", runErr
}
parts := bytes.Split(bytes.TrimSpace(stdout), []byte{'\n'})
var startCommitID string
for _, commitID := range parts {
branches, err := repo.getBranches(env, string(commitID), 2)
if err != nil {
return "", err
}
for _, b := range branches {
if b != branch {
return startCommitID, nil
}
}
startCommitID = string(commitID)
}
return "", nil
}

View File

@ -33,9 +33,12 @@ func (repo *Repository) ResolveReference(name string) (string, error) {
// GetRefCommitID returns the last commit ID string of given reference (branch or tag).
func (repo *Repository) GetRefCommitID(name string) (string, error) {
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
if err != nil {
return "", err
}
defer cancel()
_, err := wr.Write([]byte(name + "\n"))
_, err = wr.Write([]byte(name + "\n"))
if err != nil {
return "", err
}
@ -61,12 +64,19 @@ func (repo *Repository) RemoveReference(name string) error {
// IsCommitExist returns true if given commit exists in current repository.
func (repo *Repository) IsCommitExist(name string) bool {
if err := ensureValidGitRepository(repo.Ctx, repo.Path); err != nil {
log.Error("IsCommitExist: %v", err)
return false
}
_, _, err := NewCommand(repo.Ctx, "cat-file", "-e").AddDynamicArguments(name).RunStdString(&RunOpts{Dir: repo.Path})
return err == nil
}
func (repo *Repository) getCommit(id ObjectID) (*Commit, error) {
wr, rd, cancel := repo.CatFileBatch(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatch(repo.Ctx)
if err != nil {
return nil, err
}
defer cancel()
_, _ = wr.Write([]byte(id.String() + "\n"))
@ -143,7 +153,10 @@ func (repo *Repository) ConvertToGitID(commitID string) (ObjectID, error) {
}
}
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
if err != nil {
return nil, err
}
defer cancel()
_, err = wr.Write([]byte(commitID + "\n"))
if err != nil {

View File

@ -4,6 +4,7 @@
package git
import (
"os"
"path/filepath"
"testing"
@ -31,7 +32,7 @@ func TestRepository_GetCommitBranches(t *testing.T) {
for _, testCase := range testCases {
commit, err := bareRepo1.GetCommit(testCase.CommitID)
assert.NoError(t, err)
branches, err := bareRepo1.getBranches(commit, 2)
branches, err := bareRepo1.getBranches(os.Environ(), commit.ID.String(), 2)
assert.NoError(t, err)
assert.Equal(t, testCase.ExpectedBranches, branches)
}

View File

@ -20,7 +20,10 @@ import (
func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, error) {
// We will feed the commit IDs in order into cat-file --batch, followed by blobs as necessary.
// so let's create a batch stdin and stdout
batchStdinWriter, batchReader, cancel := repo.CatFileBatch(repo.Ctx)
batchStdinWriter, batchReader, cancel, err := repo.CatFileBatch(repo.Ctx)
if err != nil {
return nil, err
}
defer cancel()
writeID := func(id string) error {

View File

@ -31,9 +31,12 @@ func (repo *Repository) GetTags(skip, limit int) (tags []string, err error) {
// GetTagType gets the type of the tag, either commit (simple) or tag (annotated)
func (repo *Repository) GetTagType(id ObjectID) (string, error) {
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
if err != nil {
return "", err
}
defer cancel()
_, err := wr.Write([]byte(id.String() + "\n"))
_, err = wr.Write([]byte(id.String() + "\n"))
if err != nil {
return "", err
}
@ -89,7 +92,10 @@ func (repo *Repository) getTag(tagID ObjectID, name string) (*Tag, error) {
}
// The tag is an annotated tag with a message.
wr, rd, cancel := repo.CatFileBatch(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatch(repo.Ctx)
if err != nil {
return nil, err
}
defer cancel()
if _, err := wr.Write([]byte(tagID.String() + "\n")); err != nil {

View File

@ -6,11 +6,20 @@
package git
import "github.com/go-git/go-git/v5/plumbing"
import (
"errors"
"github.com/go-git/go-git/v5/plumbing"
)
func (repo *Repository) getTree(id ObjectID) (*Tree, error) {
gogitTree, err := repo.gogitRepo.TreeObject(plumbing.Hash(id.RawValue()))
if err != nil {
if errors.Is(err, plumbing.ErrObjectNotFound) {
return nil, ErrNotExist{
ID: id.String(),
}
}
return nil, err
}

View File

@ -10,7 +10,10 @@ import (
)
func (repo *Repository) getTree(id ObjectID) (*Tree, error) {
wr, rd, cancel := repo.CatFileBatch(repo.Ctx)
wr, rd, cancel, err := repo.CatFileBatch(repo.Ctx)
if err != nil {
return nil, err
}
defer cancel()
_, _ = wr.Write([]byte(id.String() + "\n"))

View File

@ -42,9 +42,13 @@ func (te *TreeEntry) Size() int64 {
return te.size
}
wr, rd, cancel := te.ptree.repo.CatFileBatchCheck(te.ptree.repo.Ctx)
wr, rd, cancel, err := te.ptree.repo.CatFileBatchCheck(te.ptree.repo.Ctx)
if err != nil {
log.Debug("error whilst reading size for %s in %s. Error: %v", te.ID.String(), te.ptree.repo.Path, err)
return 0
}
defer cancel()
_, err := wr.Write([]byte(te.ID.String() + "\n"))
_, err = wr.Write([]byte(te.ID.String() + "\n"))
if err != nil {
log.Debug("error whilst reading size for %s in %s. Error: %v", te.ID.String(), te.ptree.repo.Path, err)
return 0

View File

@ -33,7 +33,10 @@ func (t *Tree) ListEntries() (Entries, error) {
}
if t.repo != nil {
wr, rd, cancel := t.repo.CatFileBatch(t.repo.Ctx)
wr, rd, cancel, err := t.repo.CatFileBatch(t.repo.Ctx)
if err != nil {
return nil, err
}
defer cancel()
_, _ = wr.Write([]byte(t.ID.String() + "\n"))

View File

@ -0,0 +1,74 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package globallock
import (
"context"
"sync"
"code.gitea.io/gitea/modules/setting"
)
var (
defaultLocker Locker
initOnce sync.Once
initFunc = func() {
switch setting.GlobalLock.ServiceType {
case "redis":
defaultLocker = NewRedisLocker(setting.GlobalLock.ServiceConnStr)
case "memory":
fallthrough
default:
defaultLocker = NewMemoryLocker()
}
} // define initFunc as a variable to make it possible to change it in tests
)
// DefaultLocker returns the default locker.
func DefaultLocker() Locker {
initOnce.Do(func() {
initFunc()
})
return defaultLocker
}
// Lock tries to acquire a lock for the given key, it uses the default locker.
// Read the documentation of Locker.Lock for more information about the behavior.
func Lock(ctx context.Context, key string) (ReleaseFunc, error) {
return DefaultLocker().Lock(ctx, key)
}
// TryLock tries to acquire a lock for the given key, it uses the default locker.
// Read the documentation of Locker.TryLock for more information about the behavior.
func TryLock(ctx context.Context, key string) (bool, ReleaseFunc, error) {
return DefaultLocker().TryLock(ctx, key)
}
// LockAndDo tries to acquire a lock for the given key and then calls the given function.
// It uses the default locker, and it will return an error if failed to acquire the lock.
func LockAndDo(ctx context.Context, key string, f func(context.Context) error) error {
release, err := Lock(ctx, key)
if err != nil {
return err
}
defer release()
return f(ctx)
}
// TryLockAndDo tries to acquire a lock for the given key and then calls the given function.
// It uses the default locker, and it will return false if failed to acquire the lock.
func TryLockAndDo(ctx context.Context, key string, f func(context.Context) error) (bool, error) {
ok, release, err := TryLock(ctx, key)
if err != nil {
return false, err
}
defer release()
if !ok {
return false, nil
}
return true, f(ctx)
}

View File

@ -0,0 +1,96 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package globallock
import (
"context"
"os"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLockAndDo(t *testing.T) {
t.Run("redis", func(t *testing.T) {
url := "redis://127.0.0.1:6379/0"
if os.Getenv("CI") == "" {
// Make it possible to run tests against a local redis instance
url = os.Getenv("TEST_REDIS_URL")
if url == "" {
t.Skip("TEST_REDIS_URL not set and not running in CI")
return
}
}
oldDefaultLocker := defaultLocker
oldInitFunc := initFunc
defer func() {
defaultLocker = oldDefaultLocker
initFunc = oldInitFunc
if defaultLocker == nil {
initOnce = sync.Once{}
}
}()
initOnce = sync.Once{}
initFunc = func() {
defaultLocker = NewRedisLocker(url)
}
testLockAndDo(t)
require.NoError(t, defaultLocker.(*redisLocker).Close())
})
t.Run("memory", func(t *testing.T) {
oldDefaultLocker := defaultLocker
oldInitFunc := initFunc
defer func() {
defaultLocker = oldDefaultLocker
initFunc = oldInitFunc
if defaultLocker == nil {
initOnce = sync.Once{}
}
}()
initOnce = sync.Once{}
initFunc = func() {
defaultLocker = NewMemoryLocker()
}
testLockAndDo(t)
})
}
func testLockAndDo(t *testing.T) {
const concurrency = 1000
ctx := context.Background()
count := 0
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go func() {
defer wg.Done()
err := LockAndDo(ctx, "test", func(ctx context.Context) error {
count++
// It's impossible to acquire the lock inner the function
ok, err := TryLockAndDo(ctx, "test", func(ctx context.Context) error {
assert.Fail(t, "should not acquire the lock")
return nil
})
assert.False(t, ok)
assert.NoError(t, err)
return nil
})
require.NoError(t, err)
}()
}
wg.Wait()
assert.Equal(t, concurrency, count)
}

View File

@ -0,0 +1,38 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package globallock
import (
"context"
)
type Locker interface {
// Lock tries to acquire a lock for the given key, it blocks until the lock is acquired or the context is canceled.
//
// Lock returns a ReleaseFunc to release the lock, it cannot be nil.
// It's always safe to call this function even if it fails to acquire the lock, and it will do nothing in that case.
// And it's also safe to call it multiple times, but it will only release the lock once.
// That's why it's called ReleaseFunc, not UnlockFunc.
// But be aware that it's not safe to not call it at all; it could lead to a memory leak.
// So a recommended pattern is to use defer to call it:
// release, err := locker.Lock(ctx, "key")
// if err != nil {
// return err
// }
// defer release()
//
// Lock returns an error if failed to acquire the lock.
// Be aware that even the context is not canceled, it's still possible to fail to acquire the lock.
// For example, redis is down, or it reached the maximum number of tries.
Lock(ctx context.Context, key string) (ReleaseFunc, error)
// TryLock tries to acquire a lock for the given key, it returns immediately.
// It follows the same pattern as Lock, but it doesn't block.
// And if it fails to acquire the lock because it's already locked, not other reasons like redis is down,
// it will return false without any error.
TryLock(ctx context.Context, key string) (bool, ReleaseFunc, error)
}
// ReleaseFunc is a function that releases a lock.
type ReleaseFunc func()

View File

@ -0,0 +1,181 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package globallock
import (
"context"
"os"
"sync"
"testing"
"time"
"github.com/go-redsync/redsync/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLocker(t *testing.T) {
t.Run("redis", func(t *testing.T) {
url := "redis://127.0.0.1:6379/0"
if os.Getenv("CI") == "" {
// Make it possible to run tests against a local redis instance
url = os.Getenv("TEST_REDIS_URL")
if url == "" {
t.Skip("TEST_REDIS_URL not set and not running in CI")
return
}
}
oldExpiry := redisLockExpiry
redisLockExpiry = 5 * time.Second // make it shorter for testing
defer func() {
redisLockExpiry = oldExpiry
}()
locker := NewRedisLocker(url)
testLocker(t, locker)
testRedisLocker(t, locker.(*redisLocker))
require.NoError(t, locker.(*redisLocker).Close())
})
t.Run("memory", func(t *testing.T) {
locker := NewMemoryLocker()
testLocker(t, locker)
testMemoryLocker(t, locker.(*memoryLocker))
})
}
func testLocker(t *testing.T, locker Locker) {
t.Run("lock", func(t *testing.T) {
parentCtx := context.Background()
release, err := locker.Lock(parentCtx, "test")
defer release()
assert.NoError(t, err)
func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
release, err := locker.Lock(ctx, "test")
defer release()
assert.Error(t, err)
}()
release()
func() {
release, err := locker.Lock(context.Background(), "test")
defer release()
assert.NoError(t, err)
}()
})
t.Run("try lock", func(t *testing.T) {
parentCtx := context.Background()
ok, release, err := locker.TryLock(parentCtx, "test")
defer release()
assert.True(t, ok)
assert.NoError(t, err)
func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
ok, release, err := locker.TryLock(ctx, "test")
defer release()
assert.False(t, ok)
assert.NoError(t, err)
}()
release()
func() {
ok, release, _ := locker.TryLock(context.Background(), "test")
defer release()
assert.True(t, ok)
}()
})
t.Run("wait and acquired", func(t *testing.T) {
ctx := context.Background()
release, err := locker.Lock(ctx, "test")
require.NoError(t, err)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
started := time.Now()
release, err := locker.Lock(context.Background(), "test") // should be blocked for seconds
defer release()
assert.Greater(t, time.Since(started), time.Second)
assert.NoError(t, err)
}()
time.Sleep(2 * time.Second)
release()
wg.Wait()
})
t.Run("multiple release", func(t *testing.T) {
ctx := context.Background()
release1, err := locker.Lock(ctx, "test")
require.NoError(t, err)
release1()
release2, err := locker.Lock(ctx, "test")
defer release2()
require.NoError(t, err)
// Call release1 again,
// it should not panic or block,
// and it shouldn't affect the other lock
release1()
ok, release3, err := locker.TryLock(ctx, "test")
defer release3()
require.NoError(t, err)
// It should be able to acquire the lock;
// otherwise, it means the lock has been released by release1
assert.False(t, ok)
})
}
// testMemoryLocker does specific tests for memoryLocker
func testMemoryLocker(t *testing.T, locker *memoryLocker) {
// nothing to do
}
// testRedisLocker does specific tests for redisLocker
func testRedisLocker(t *testing.T, locker *redisLocker) {
defer func() {
// This case should be tested at the end.
// Otherwise, it will affect other tests.
t.Run("close", func(t *testing.T) {
assert.NoError(t, locker.Close())
_, err := locker.Lock(context.Background(), "test")
assert.Error(t, err)
})
}()
t.Run("failed extend", func(t *testing.T) {
release, err := locker.Lock(context.Background(), "test")
defer release()
require.NoError(t, err)
// It simulates that there are some problems with extending like network issues or redis server down.
v, ok := locker.mutexM.Load("test")
require.True(t, ok)
m := v.(*redsync.Mutex)
_, _ = m.Unlock() // release it to make it impossible to extend
// In current design, callers can't know the lock can't be extended.
// Just keep this case to improve the test coverage.
})
}

View File

@ -0,0 +1,67 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package globallock
import (
"context"
"sync"
"time"
)
type memoryLocker struct {
locks sync.Map
}
var _ Locker = &memoryLocker{}
func NewMemoryLocker() Locker {
return &memoryLocker{}
}
func (l *memoryLocker) Lock(ctx context.Context, key string) (ReleaseFunc, error) {
if l.tryLock(key) {
releaseOnce := sync.Once{}
return func() {
releaseOnce.Do(func() {
l.locks.Delete(key)
})
}, nil
}
ticker := time.NewTicker(time.Millisecond * 100)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return func() {}, ctx.Err()
case <-ticker.C:
if l.tryLock(key) {
releaseOnce := sync.Once{}
return func() {
releaseOnce.Do(func() {
l.locks.Delete(key)
})
}, nil
}
}
}
}
func (l *memoryLocker) TryLock(_ context.Context, key string) (bool, ReleaseFunc, error) {
if l.tryLock(key) {
releaseOnce := sync.Once{}
return true, func() {
releaseOnce.Do(func() {
l.locks.Delete(key)
})
}, nil
}
return false, func() {}, nil
}
func (l *memoryLocker) tryLock(key string) bool {
_, loaded := l.locks.LoadOrStore(key, struct{}{})
return !loaded
}

View File

@ -0,0 +1,137 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package globallock
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"code.gitea.io/gitea/modules/nosql"
"github.com/go-redsync/redsync/v4"
"github.com/go-redsync/redsync/v4/redis/goredis/v9"
)
const redisLockKeyPrefix = "gitea:globallock:"
// redisLockExpiry is the default expiry time for a lock.
// Define it as a variable to make it possible to change it in tests.
var redisLockExpiry = 30 * time.Second
type redisLocker struct {
rs *redsync.Redsync
mutexM sync.Map
closed atomic.Bool
extendWg sync.WaitGroup
}
var _ Locker = &redisLocker{}
func NewRedisLocker(connection string) Locker {
l := &redisLocker{
rs: redsync.New(
goredis.NewPool(
nosql.GetManager().GetRedisClient(connection),
),
),
}
l.extendWg.Add(1)
l.startExtend()
return l
}
func (l *redisLocker) Lock(ctx context.Context, key string) (ReleaseFunc, error) {
return l.lock(ctx, key, 0)
}
func (l *redisLocker) TryLock(ctx context.Context, key string) (bool, ReleaseFunc, error) {
f, err := l.lock(ctx, key, 1)
var (
errTaken *redsync.ErrTaken
errNodeTaken *redsync.ErrNodeTaken
)
if errors.As(err, &errTaken) || errors.As(err, &errNodeTaken) {
return false, f, nil
}
return err == nil, f, err
}
// Close closes the locker.
// It will stop extending the locks and refuse to acquire new locks.
// In actual use, it is not necessary to call this function.
// But it's useful in tests to release resources.
// It could take some time since it waits for the extending goroutine to finish.
func (l *redisLocker) Close() error {
l.closed.Store(true)
l.extendWg.Wait()
return nil
}
func (l *redisLocker) lock(ctx context.Context, key string, tries int) (ReleaseFunc, error) {
if l.closed.Load() {
return func() {}, fmt.Errorf("locker is closed")
}
options := []redsync.Option{
redsync.WithExpiry(redisLockExpiry),
}
if tries > 0 {
options = append(options, redsync.WithTries(tries))
}
mutex := l.rs.NewMutex(redisLockKeyPrefix+key, options...)
if err := mutex.LockContext(ctx); err != nil {
return func() {}, err
}
l.mutexM.Store(key, mutex)
releaseOnce := sync.Once{}
return func() {
releaseOnce.Do(func() {
l.mutexM.Delete(key)
// It's safe to ignore the error here,
// if it failed to unlock, it will be released automatically after the lock expires.
// Do not call mutex.UnlockContext(ctx) here, or it will fail to release when ctx has timed out.
_, _ = mutex.Unlock()
})
}, nil
}
func (l *redisLocker) startExtend() {
if l.closed.Load() {
l.extendWg.Done()
return
}
toExtend := make([]*redsync.Mutex, 0)
l.mutexM.Range(func(_, value any) bool {
m := value.(*redsync.Mutex)
// Extend the lock if it is not expired.
// Although the mutex will be removed from the map before it is released,
// it still can be expired because of a failed extension.
// If it happens, it does not need to be extended anymore.
if time.Now().After(m.Until()) {
return true
}
toExtend = append(toExtend, m)
return true
})
for _, v := range toExtend {
// If it failed to extend, it will be released automatically after the lock expires.
_, _ = v.Extend()
}
time.AfterFunc(redisLockExpiry/2, l.startExtend)
}

View File

@ -16,10 +16,10 @@ import (
"code.gitea.io/gitea/modules/analyze"
"code.gitea.io/gitea/modules/charset"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/indexer/code/internal"
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
inner_bleve "code.gitea.io/gitea/modules/indexer/internal/bleve"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/typesniffer"
@ -189,21 +189,23 @@ func (b *Indexer) addDelete(filename string, repo *repo_model.Repository, batch
func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error {
batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize)
if len(changes.Updates) > 0 {
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil {
log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err)
r, err := gitrepo.OpenRepository(ctx, repo)
if err != nil {
return err
}
batchWriter, batchReader, cancel := git.CatFileBatch(ctx, repo.RepoPath())
defer cancel()
defer r.Close()
gitBatch, err := r.NewBatch(ctx)
if err != nil {
return err
}
defer gitBatch.Close()
for _, update := range changes.Updates {
if err := b.addUpdate(ctx, batchWriter, batchReader, sha, update, repo, batch); err != nil {
if err := b.addUpdate(ctx, gitBatch.Writer, gitBatch.Reader, sha, update, repo, batch); err != nil {
return err
}
}
cancel()
gitBatch.Close()
}
for _, filename := range changes.RemovedFilenames {
if err := b.addDelete(filename, repo, batch); err != nil {

View File

@ -15,11 +15,11 @@ import (
"code.gitea.io/gitea/modules/analyze"
"code.gitea.io/gitea/modules/charset"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/indexer/code/internal"
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
inner_elasticsearch "code.gitea.io/gitea/modules/indexer/internal/elasticsearch"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/typesniffer"
@ -154,17 +154,19 @@ func (b *Indexer) addDelete(filename string, repo *repo_model.Repository) elasti
func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error {
reqs := make([]elastic.BulkableRequest, 0)
if len(changes.Updates) > 0 {
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil {
log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err)
r, err := gitrepo.OpenRepository(ctx, repo)
if err != nil {
return err
}
batchWriter, batchReader, cancel := git.CatFileBatch(ctx, repo.RepoPath())
defer cancel()
defer r.Close()
batch, err := r.NewBatch(ctx)
if err != nil {
return err
}
defer batch.Close()
for _, update := range changes.Updates {
updateReqs, err := b.addUpdate(ctx, batchWriter, batchReader, sha, update, repo)
updateReqs, err := b.addUpdate(ctx, batch.Writer, batch.Reader, sha, update, repo)
if err != nil {
return err
}
@ -172,7 +174,7 @@ func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha st
reqs = append(reqs, updateReqs...)
}
}
cancel()
batch.Close()
}
for _, filename := range changes.RemovedFilenames {

View File

@ -113,7 +113,24 @@ func nonGenesisChanges(ctx context.Context, repo *repo_model.Repository, revisio
var changes internal.RepoChanges
var err error
updatedFilenames := make([]string, 0, 10)
for _, line := range strings.Split(stdout, "\n") {
updateChanges := func() error {
cmd := git.NewCommand(ctx, "ls-tree", "--full-tree", "-l").AddDynamicArguments(revision).
AddDashesAndList(updatedFilenames...)
lsTreeStdout, _, err := cmd.RunStdBytes(&git.RunOpts{Dir: repo.RepoPath()})
if err != nil {
return err
}
updates, err1 := parseGitLsTreeOutput(lsTreeStdout)
if err1 != nil {
return err1
}
changes.Updates = append(changes.Updates, updates...)
return nil
}
lines := strings.Split(stdout, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
if len(line) == 0 {
continue
@ -161,15 +178,22 @@ func nonGenesisChanges(ctx context.Context, repo *repo_model.Repository, revisio
default:
log.Warn("Unrecognized status: %c (line=%s)", status, line)
}
}
cmd := git.NewCommand(ctx, "ls-tree", "--full-tree", "-l").AddDynamicArguments(revision).
AddDashesAndList(updatedFilenames...)
lsTreeStdout, _, err := cmd.RunStdBytes(&git.RunOpts{Dir: repo.RepoPath()})
if err != nil {
// According to https://learn.microsoft.com/en-us/troubleshoot/windows-client/shell-experience/command-line-string-limitation#more-information
// the command line length should less than 8191 characters, assume filepath is 256, then 8191/256 = 31, so we use 30
if len(updatedFilenames) >= 30 {
if err := updateChanges(); err != nil {
return nil, err
}
updatedFilenames = updatedFilenames[0:0]
}
}
if len(updatedFilenames) > 0 {
if err := updateChanges(); err != nil {
return nil, err
}
}
changes.Updates, err = parseGitLsTreeOutput(lsTreeStdout)
return &changes, err
}

View File

@ -466,6 +466,7 @@ name: Name
title: Title
about: About
labels: ["label1", "label2"]
assignees: ["user1", "user2"]
ref: Ref
body:
- type: markdown
@ -527,6 +528,7 @@ body:
Title: "Title",
About: "About",
Labels: []string{"label1", "label2"},
Assignees: []string{"user1", "user2"},
Ref: "Ref",
Fields: []*api.IssueFormField{
{

View File

@ -136,14 +136,13 @@ func (c *HTTPClient) performOperation(ctx context.Context, objects []Pointer, dc
for _, object := range result.Objects {
if object.Error != nil {
objectError := errors.New(object.Error.Message)
log.Trace("Error on object %v: %v", object.Pointer, objectError)
log.Trace("Error on object %v: %v", object.Pointer, object.Error)
if uc != nil {
if _, err := uc(object.Pointer, objectError); err != nil {
if _, err := uc(object.Pointer, object.Error); err != nil {
return err
}
} else {
if err := dc(object.Pointer, nil, objectError); err != nil {
if err := dc(object.Pointer, nil, object.Error); err != nil {
return err
}
}

View File

@ -4,7 +4,11 @@
package lfs
import (
"errors"
"fmt"
"time"
"code.gitea.io/gitea/modules/util"
)
const (
@ -64,6 +68,39 @@ type ObjectError struct {
Message string `json:"message"`
}
var (
// See https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#successful-responses
// LFS object error codes should match HTTP status codes where possible:
// 404 - The object does not exist on the server.
// 409 - The specified hash algorithm disagrees with the server's acceptable options.
// 410 - The object was removed by the owner.
// 422 - Validation error.
ErrObjectNotExist = util.ErrNotExist // the object does not exist on the server
ErrObjectHashMismatch = errors.New("the specified hash algorithm disagrees with the server's acceptable options")
ErrObjectRemoved = errors.New("the object was removed by the owner")
ErrObjectValidation = errors.New("validation error")
)
func (e *ObjectError) Error() string {
return fmt.Sprintf("[%d] %s", e.Code, e.Message)
}
func (e *ObjectError) Unwrap() error {
switch e.Code {
case 404:
return ErrObjectNotExist
case 409:
return ErrObjectHashMismatch
case 410:
return ErrObjectRemoved
case 422:
return ErrObjectValidation
default:
return errors.New(e.Message)
}
}
// PointerBlob associates a Git blob with a Pointer.
type PointerBlob struct {
Hash string

View File

@ -1144,7 +1144,8 @@ func hashCurrentPatternProcessor(ctx *RenderContext, node *html.Node) {
})
}
exist = ctx.GitRepo.IsObjectExist(hash)
// Don't use IsObjectExist since it doesn't support short hashs with gogit edition.
exist = ctx.GitRepo.IsReferenceExist(hash)
ctx.ShaExistCache[hash] = exist
}

View File

@ -4,8 +4,6 @@
package markup
import (
"path"
"code.gitea.io/gitea/modules/util"
)
@ -14,13 +12,9 @@ func ResolveLink(ctx *RenderContext, link, userContentAnchorPrefix string) (resu
if !isAnchorFragment && !IsFullURLString(link) {
linkBase := ctx.Links.Base
if ctx.IsWiki {
if ext := path.Ext(link); ext == "" || ext == ".-" {
linkBase = ctx.Links.WikiLink() // the link is for a wiki page
} else if DetectMarkupTypeByFileName(link) != "" {
linkBase = ctx.Links.WikiLink() // the link is renderable as a wiki page
} else {
linkBase = ctx.Links.WikiRawLink() // otherwise, use a raw link instead to view&download medias
}
// no need to check if the link should be resolved as a wiki link or a wiki raw link
// just use wiki link here and it will be redirected to a wiki raw link if necessary
linkBase = ctx.Links.WikiLink()
} else if ctx.Links.BranchPath != "" || ctx.Links.TreePath != "" {
// if there is no BranchPath, then the link will be something like "/owner/repo/src/{the-file-path}"
// and then this link will be handled by the "legacy-ref" code and be redirected to the default branch like "/owner/repo/src/branch/main/{the-file-path}"

View File

@ -437,7 +437,7 @@ func TestRender_ShortLinks(t *testing.T) {
renderableFileURL := util.URLJoin(tree, "markdown_file.md")
renderableFileURLWiki := util.URLJoin(markup.TestRepoURL, "wiki", "markdown_file.md")
unrenderableFileURL := util.URLJoin(tree, "file.zip")
unrenderableFileURLWiki := util.URLJoin(markup.TestRepoURL, "wiki", "raw", "file.zip")
unrenderableFileURLWiki := util.URLJoin(markup.TestRepoURL, "wiki", "file.zip")
favicon := "http://google.com/favicon.ico"
test(

View File

@ -672,9 +672,9 @@ space</p>
Expected: `<p>space @mention-user<br/>
/just/a/path.bin<br/>
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
<a href="/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/wiki/raw/image.jpg" alt="local image"/></a><br/>
<a href="/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/wiki/raw/path/file" alt="local image"/></a><br/>
@ -730,9 +730,9 @@ space</p>
Expected: `<p>space @mention-user<br/>
/just/a/path.bin<br/>
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
<a href="https://gitea.io/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="https://gitea.io/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="https://gitea.io/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="https://gitea.io/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="https://gitea.io/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="https://gitea.io/wiki/raw/image.jpg" alt="local image"/></a><br/>
<a href="https://gitea.io/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="https://gitea.io/wiki/raw/path/file" alt="local image"/></a><br/>
@ -788,9 +788,9 @@ space</p>
Expected: `<p>space @mention-user<br/>
/just/a/path.bin<br/>
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>
@ -848,9 +848,9 @@ space</p>
Expected: `<p>space @mention-user<br/>
/just/a/path.bin<br/>
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>
@ -908,9 +908,9 @@ space</p>
Expected: `<p>space @mention-user<br/>
/just/a/path.bin<br/>
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>
@ -970,9 +970,9 @@ space</p>
Expected: `<p>space @mention-user<br/>
/just/a/path.bin<br/>
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
<a href="https://example.com" rel="nofollow">remote link</a><br/>
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>

View File

@ -45,7 +45,7 @@ func (p *PullRequest) GetContext() DownloaderContext { return p.Context }
// IsForkPullRequest returns true if the pull request from a forked repository but not the same repository
func (p *PullRequest) IsForkPullRequest() bool {
return p.Head.RepoPath() != p.Base.RepoPath()
return p.Head.RepoFullName() != p.Base.RepoFullName()
}
// GetGitRefName returns pull request relative path to head
@ -62,8 +62,8 @@ type PullRequestBranch struct {
OwnerName string `yaml:"owner_name"`
}
// RepoPath returns pull request repo path
func (p PullRequestBranch) RepoPath() string {
// RepoFullName returns pull request repo full name
func (p PullRequestBranch) RepoFullName() string {
return fmt.Sprintf("%s/%s", p.OwnerName, p.RepoName)
}

View File

@ -13,8 +13,7 @@ import (
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/validation"
"github.com/klauspost/compress/zstd"
"code.gitea.io/gitea/modules/zstd"
)
var (

View File

@ -10,8 +10,9 @@ import (
"io"
"testing"
"code.gitea.io/gitea/modules/zstd"
"github.com/dsnet/compress/bzip2"
"github.com/klauspost/compress/zstd"
"github.com/stretchr/testify/assert"
)

View File

@ -14,9 +14,9 @@ import (
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/validation"
"code.gitea.io/gitea/modules/zstd"
"github.com/blakesmith/ar"
"github.com/klauspost/compress/zstd"
"github.com/ulikunitz/xz"
)

View File

@ -10,8 +10,9 @@ import (
"io"
"testing"
"code.gitea.io/gitea/modules/zstd"
"github.com/blakesmith/ar"
"github.com/klauspost/compress/zstd"
"github.com/stretchr/testify/assert"
"github.com/ulikunitz/xz"
)

View File

@ -5,6 +5,7 @@ package repository
import (
"context"
"errors"
"fmt"
"io"
"strings"
@ -181,6 +182,10 @@ func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Re
downloadObjects := func(pointers []lfs.Pointer) error {
err := lfsClient.Download(ctx, pointers, func(p lfs.Pointer, content io.ReadCloser, objectError error) error {
if objectError != nil {
if errors.Is(objectError, lfs.ErrObjectNotExist) {
log.Warn("Repo[%-v]: Ignore missing LFS object %-v: %v", repo, p, objectError)
return nil
}
return objectError
}

View File

@ -14,10 +14,12 @@ import (
// Actions settings
var (
Actions = struct {
Enabled bool
LogStorage *Storage // how the created logs should be stored
LogRetentionDays int64 `ini:"LOG_RETENTION_DAYS"`
LogCompression logCompression `ini:"LOG_COMPRESSION"`
ArtifactStorage *Storage // how the created artifacts should be stored
ArtifactRetentionDays int64 `ini:"ARTIFACT_RETENTION_DAYS"`
Enabled bool
DefaultActionsURL defaultActionsURL `ini:"DEFAULT_ACTIONS_URL"`
ZombieTaskTimeout time.Duration `ini:"ZOMBIE_TASK_TIMEOUT"`
EndlessTaskTimeout time.Duration `ini:"ENDLESS_TASK_TIMEOUT"`
@ -53,6 +55,20 @@ const (
// please consider to use `uses: https://the_url_you_want_to_use/username/action_name@version` instead.
)
type logCompression string
func (c logCompression) IsValid() bool {
return c.IsNone() || c.IsZstd()
}
func (c logCompression) IsNone() bool {
return c == "" || strings.ToLower(string(c)) == "none"
}
func (c logCompression) IsZstd() bool {
return strings.ToLower(string(c)) == "zstd"
}
func loadActionsFrom(rootCfg ConfigProvider) error {
sec := rootCfg.Section("actions")
err := sec.MapTo(&Actions)
@ -78,10 +94,17 @@ func loadActionsFrom(rootCfg ConfigProvider) error {
if err != nil {
return err
}
// default to 1 year
if Actions.LogRetentionDays <= 0 {
Actions.LogRetentionDays = 365
}
actionsSec, _ := rootCfg.GetSection("actions.artifacts")
Actions.ArtifactStorage, err = getStorage(rootCfg, "actions_artifacts", "", actionsSec)
if err != nil {
return err
}
// default to 90 days in Github Actions
if Actions.ArtifactRetentionDays <= 0 {
@ -92,5 +115,9 @@ func loadActionsFrom(rootCfg ConfigProvider) error {
Actions.EndlessTaskTimeout = sec.Key("ENDLESS_TASK_TIMEOUT").MustDuration(3 * time.Hour)
Actions.AbandonedJobTimeout = sec.Key("ABANDONED_JOB_TIMEOUT").MustDuration(24 * time.Hour)
return err
if !Actions.LogCompression.IsValid() {
return fmt.Errorf("invalid [actions] LOG_COMPRESSION: %q", Actions.LogCompression)
}
return nil
}

View File

@ -6,7 +6,7 @@ package setting
import (
"code.gitea.io/gitea/modules/log"
"github.com/go-fed/httpsig"
"github.com/42wim/httpsig"
)
// Federation settings

View File

@ -0,0 +1,37 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/nosql"
)
// GlobalLock represents configuration of global lock
var GlobalLock = struct {
ServiceType string
ServiceConnStr string
}{
ServiceType: "memory",
}
func loadGlobalLockFrom(rootCfg ConfigProvider) {
sec := rootCfg.Section("global_lock")
GlobalLock.ServiceType = sec.Key("SERVICE_TYPE").MustString("memory")
switch GlobalLock.ServiceType {
case "memory":
case "redis":
connStr := sec.Key("SERVICE_CONN_STR").String()
if connStr == "" {
log.Fatal("SERVICE_CONN_STR is empty for redis")
}
u := nosql.ToRedisURI(connStr)
if u == nil {
log.Fatal("SERVICE_CONN_STR %s is not a valid redis connection string", connStr)
}
GlobalLock.ServiceConnStr = connStr
default:
log.Fatal("Unknown sync lock service type: %s", GlobalLock.ServiceType)
}
}

View File

@ -0,0 +1,35 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLoadGlobalLockConfig(t *testing.T) {
t.Run("DefaultGlobalLockConfig", func(t *testing.T) {
iniStr := ``
cfg, err := NewConfigProviderFromData(iniStr)
assert.NoError(t, err)
loadGlobalLockFrom(cfg)
assert.EqualValues(t, "memory", GlobalLock.ServiceType)
})
t.Run("RedisGlobalLockConfig", func(t *testing.T) {
iniStr := `
[global_lock]
SERVICE_TYPE = redis
SERVICE_CONN_STR = addrs=127.0.0.1:6379 db=0
`
cfg, err := NewConfigProviderFromData(iniStr)
assert.NoError(t, err)
loadGlobalLockFrom(cfg)
assert.EqualValues(t, "redis", GlobalLock.ServiceType)
assert.EqualValues(t, "addrs=127.0.0.1:6379 db=0", GlobalLock.ServiceConnStr)
})
}

View File

@ -42,6 +42,8 @@ var (
LimitSizeRubyGems int64
LimitSizeSwift int64
LimitSizeVagrant int64
DefaultRPMSignEnabled bool
}{
Enabled: true,
LimitTotalOwnerCount: -1,
@ -97,6 +99,7 @@ func loadPackagesFrom(rootCfg ConfigProvider) (err error) {
Packages.LimitSizeRubyGems = mustBytes(sec, "LIMIT_SIZE_RUBYGEMS")
Packages.LimitSizeSwift = mustBytes(sec, "LIMIT_SIZE_SWIFT")
Packages.LimitSizeVagrant = mustBytes(sec, "LIMIT_SIZE_VAGRANT")
Packages.DefaultRPMSignEnabled = sec.Key("DEFAULT_RPM_SIGN_ENABLED").MustBool(false)
return nil
}

View File

@ -147,6 +147,7 @@ func loadCommonSettingsFrom(cfg ConfigProvider) error {
loadGitFrom(cfg)
loadMirrorFrom(cfg)
loadMarkupFrom(cfg)
loadGlobalLockFrom(cfg)
loadOtherFrom(cfg)
return nil
}

View File

@ -494,3 +494,17 @@ type PackagePayload struct {
func (p *PackagePayload) JSONPayload() ([]byte, error) {
return json.MarshalIndent(p, "", " ")
}
// WorkflowDispatchPayload represents a workflow dispatch payload
type WorkflowDispatchPayload struct {
Workflow string `json:"workflow"`
Ref string `json:"ref"`
Inputs map[string]any `json:"inputs"`
Repository *Repository `json:"repository"`
Sender *User `json:"sender"`
}
// JSONPayload implements Payload
func (p *WorkflowDispatchPayload) JSONPayload() ([]byte, error) {
return json.MarshalIndent(p, "", " ")
}

View File

@ -180,16 +180,17 @@ type IssueTemplate struct {
Name string `json:"name" yaml:"name"`
Title string `json:"title" yaml:"title"`
About string `json:"about" yaml:"about"` // Using "description" in a template file is compatible
Labels IssueTemplateLabels `json:"labels" yaml:"labels"`
Labels IssueTemplateStringSlice `json:"labels" yaml:"labels"`
Assignees IssueTemplateStringSlice `json:"assignees" yaml:"assignees"`
Ref string `json:"ref" yaml:"ref"`
Content string `json:"content" yaml:"-"`
Fields []*IssueFormField `json:"body" yaml:"body"`
FileName string `json:"file_name" yaml:"-"`
}
type IssueTemplateLabels []string
type IssueTemplateStringSlice []string
func (l *IssueTemplateLabels) UnmarshalYAML(value *yaml.Node) error {
func (l *IssueTemplateStringSlice) UnmarshalYAML(value *yaml.Node) error {
var labels []string
if value.IsZero() {
*l = labels
@ -217,7 +218,7 @@ func (l *IssueTemplateLabels) UnmarshalYAML(value *yaml.Node) error {
*l = labels
return nil
}
return fmt.Errorf("line %d: cannot unmarshal %s into IssueTemplateLabels", value.Line, value.ShortTag())
return fmt.Errorf("line %d: cannot unmarshal %s into IssueTemplateStringSlice", value.Line, value.ShortTag())
}
type IssueConfigContactLink struct {

View File

@ -42,7 +42,7 @@ func TestIssueTemplate_Type(t *testing.T) {
}
}
func TestIssueTemplateLabels_UnmarshalYAML(t *testing.T) {
func TestIssueTemplateStringSlice_UnmarshalYAML(t *testing.T) {
tests := []struct {
name string
content string
@ -88,7 +88,7 @@ labels:
b: bb
`,
tmpl: &IssueTemplate{},
wantErr: "line 3: cannot unmarshal !!map into IssueTemplateLabels",
wantErr: "line 3: cannot unmarshal !!map into IssueTemplateStringSlice",
},
}
for _, tt := range tests {

View File

@ -20,6 +20,7 @@ type PullRequest struct {
Assignee *User `json:"assignee"`
Assignees []*User `json:"assignees"`
RequestedReviewers []*User `json:"requested_reviewers"`
RequestedReviewersTeams []*Team `json:"requested_reviewers_teams"`
State StateType `json:"state"`
Draft bool `json:"draft"`
IsLocked bool `json:"is_locked"`

View File

@ -5,6 +5,7 @@ package structs
// AddCollaboratorOption options when adding a user as a collaborator of a repository
type AddCollaboratorOption struct {
// enum: read,write,admin
Permission *string `json:"permission"`
}

View File

@ -1,69 +0,0 @@
// Copyright 2016 The Gogs Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package sync
import (
"sync"
)
// ExclusivePool is a pool of non-identical instances
// that only one instance with same identity is in the pool at a time.
// In other words, only instances with different identities can be in
// the pool the same time. If another instance with same identity tries
// to get into the pool, it hangs until previous instance left the pool.
//
// This pool is particularly useful for performing tasks on same resource
// on the file system in different goroutines.
type ExclusivePool struct {
lock sync.Mutex
// pool maintains locks for each instance in the pool.
pool map[string]*sync.Mutex
// count maintains the number of times an instance with same identity checks in
// to the pool, and should be reduced to 0 (removed from map) by checking out
// with same number of times.
// The purpose of count is to delete lock when count down to 0 and recycle memory
// from map object.
count map[string]int
}
// NewExclusivePool initializes and returns a new ExclusivePool object.
func NewExclusivePool() *ExclusivePool {
return &ExclusivePool{
pool: make(map[string]*sync.Mutex),
count: make(map[string]int),
}
}
// CheckIn checks in an instance to the pool and hangs while instance
// with same identity is using the lock.
func (p *ExclusivePool) CheckIn(identity string) {
p.lock.Lock()
lock, has := p.pool[identity]
if !has {
lock = &sync.Mutex{}
p.pool[identity] = lock
}
p.count[identity]++
p.lock.Unlock()
lock.Lock()
}
// CheckOut checks out an instance from the pool and releases the lock
// to let other instances with same identity to grab the lock.
func (p *ExclusivePool) CheckOut(identity string) {
p.lock.Lock()
defer p.lock.Unlock()
p.pool[identity].Unlock()
if p.count[identity] == 1 {
delete(p.pool, identity)
delete(p.count, identity)
} else {
p.count[identity]--
}
}

Some files were not shown because too many files have changed in this diff Show More