Merge branch 'main' into sync-issue-pr-and-more

This commit is contained in:
harryzcy 2023-05-16 10:47:23 +08:00 committed by GitHub
commit 994da8567e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
302 changed files with 7323 additions and 4817 deletions

View File

@ -9,13 +9,15 @@ parserOptions:
ecmaVersion: latest
plugins:
- eslint-plugin-unicorn
- "@eslint-community/eslint-plugin-eslint-comments"
- eslint-plugin-custom-elements
- eslint-plugin-import
- eslint-plugin-jquery
- eslint-plugin-no-jquery
- eslint-plugin-sonarjs
- eslint-plugin-custom-elements
- eslint-plugin-regexp
- eslint-plugin-sonarjs
- eslint-plugin-unicorn
- eslint-plugin-wc
env:
es2022: true
@ -43,6 +45,15 @@ overrides:
import/no-unused-modules: [0]
rules:
"@eslint-community/eslint-comments/disable-enable-pair": [2]
"@eslint-community/eslint-comments/no-aggregating-enable": [2]
"@eslint-community/eslint-comments/no-duplicate-disable": [2]
"@eslint-community/eslint-comments/no-restricted-disable": [0]
"@eslint-community/eslint-comments/no-unlimited-disable": [2]
"@eslint-community/eslint-comments/no-unused-disable": [2]
"@eslint-community/eslint-comments/no-unused-enable": [2]
"@eslint-community/eslint-comments/no-use": [0]
"@eslint-community/eslint-comments/require-description": [0]
accessor-pairs: [2]
array-bracket-newline: [0]
array-bracket-spacing: [2, never]
@ -113,9 +124,9 @@ rules:
import/namespace: [0]
import/newline-after-import: [0]
import/no-absolute-path: [0]
import/no-amd: [0]
import/no-amd: [2]
import/no-anonymous-default-export: [0]
import/no-commonjs: [0]
import/no-commonjs: [2]
import/no-cycle: [2, {ignoreExternal: true, maxDepth: 1}]
import/no-default-export: [0]
import/no-deprecated: [0]
@ -576,7 +587,7 @@ rules:
sonarjs/no-nested-template-literals: [0]
sonarjs/no-one-iteration-loop: [2]
sonarjs/no-redundant-boolean: [2]
sonarjs/no-redundant-jump: [0]
sonarjs/no-redundant-jump: [2]
sonarjs/no-same-line-conditional: [2]
sonarjs/no-small-switch: [0]
sonarjs/no-unused-collection: [2]
@ -616,17 +627,18 @@ rules:
unicorn/import-style: [0]
unicorn/new-for-builtins: [2]
unicorn/no-abusive-eslint-disable: [0]
unicorn/no-array-callback-reference: [0]
unicorn/no-array-for-each: [2]
unicorn/no-array-instanceof: [0]
unicorn/no-array-method-this-argument: [2]
unicorn/no-array-push-push: [2]
unicorn/no-array-reduce: [2]
unicorn/no-await-expression-member: [0]
unicorn/no-console-spaces: [0]
unicorn/no-document-cookie: [2]
unicorn/no-empty-file: [2]
unicorn/no-fn-reference-in-iterator: [0]
unicorn/no-for-loop: [0]
unicorn/no-hex-escape: [0]
unicorn/no-instanceof-array: [0]
unicorn/no-invalid-remove-event-listener: [2]
unicorn/no-keyword-prefix: [0]
unicorn/no-lonely-if: [2]
@ -637,7 +649,6 @@ rules:
unicorn/no-null: [0]
unicorn/no-object-as-default-parameter: [0]
unicorn/no-process-exit: [0]
unicorn/no-reduce: [2]
unicorn/no-static-only-class: [2]
unicorn/no-thenable: [2]
unicorn/no-this-assignment: [2]
@ -663,15 +674,19 @@ rules:
unicorn/prefer-array-index-of: [2]
unicorn/prefer-array-some: [2]
unicorn/prefer-at: [0]
unicorn/prefer-blob-reading-methods: [2]
unicorn/prefer-code-point: [0]
unicorn/prefer-dataset: [2]
unicorn/prefer-date-now: [2]
unicorn/prefer-default-parameters: [0]
unicorn/prefer-event-key: [2]
unicorn/prefer-dom-node-append: [2]
unicorn/prefer-dom-node-dataset: [0]
unicorn/prefer-dom-node-remove: [2]
unicorn/prefer-dom-node-text-content: [2]
unicorn/prefer-event-target: [2]
unicorn/prefer-export-from: [2, {ignoreUsedVariables: true}]
unicorn/prefer-includes: [2]
unicorn/prefer-json-parse-buffer: [0]
unicorn/prefer-keyboard-event-key: [2]
unicorn/prefer-logical-operator-over-ternary: [2]
unicorn/prefer-math-trunc: [2]
unicorn/prefer-modern-dom-apis: [0]
@ -679,9 +694,7 @@ rules:
unicorn/prefer-module: [2]
unicorn/prefer-native-coercion-functions: [2]
unicorn/prefer-negative-index: [2]
unicorn/prefer-node-append: [0]
unicorn/prefer-node-protocol: [2]
unicorn/prefer-node-remove: [0]
unicorn/prefer-number-properties: [0]
unicorn/prefer-object-from-entries: [2]
unicorn/prefer-object-has-own: [0]
@ -690,17 +703,17 @@ rules:
unicorn/prefer-query-selector: [0]
unicorn/prefer-reflect-apply: [0]
unicorn/prefer-regexp-test: [2]
unicorn/prefer-replace-all: [0]
unicorn/prefer-set-has: [0]
unicorn/prefer-set-size: [2]
unicorn/prefer-spread: [0]
unicorn/prefer-starts-ends-with: [2]
unicorn/prefer-string-replace-all: [0]
unicorn/prefer-string-slice: [0]
unicorn/prefer-string-starts-ends-with: [2]
unicorn/prefer-string-trim-start-end: [2]
unicorn/prefer-switch: [0]
unicorn/prefer-ternary: [0]
unicorn/prefer-text-content: [2]
unicorn/prefer-top-level-await: [0]
unicorn/prefer-trim-start-end: [2]
unicorn/prefer-type-error: [0]
unicorn/prevent-abbreviations: [0]
unicorn/relative-url-style: [2]
@ -715,6 +728,15 @@ rules:
use-isnan: [2]
valid-typeof: [2, {requireStringLiterals: true}]
vars-on-top: [0]
wc/attach-shadow-constructor: [2]
wc/guard-super-call: [2]
wc/no-closed-shadow-root: [2]
wc/no-constructor-attributes: [2]
wc/no-constructor-params: [2]
wc/no-invalid-element-name: [0] # covered by custom-elements/valid-tag-name
wc/no-self-class: [2]
wc/no-typos: [2]
wc/require-listener-teardown: [2]
wrap-iife: [2, inside]
wrap-regex: [0]
yield-star-spacing: [2, after]

View File

@ -1,21 +1,20 @@
name: "Cron: Update licenses and gitignores"
name: cron-licenses
on:
schedule:
# weekly on Monday at 0:07 UTC
- cron: "7 0 * * 1"
- cron: "7 0 * * 1" # every Monday at 00:07 UTC
jobs:
cron:
cron-licenses:
if: github.repository == "go-gitea/gitea"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '>=1.20.1'
- name: update licenses and gitignores
run: timeout -s ABRT 40m make generate-license generate-gitignore
go-version: ">=1.20.1"
- run: make generate-license generate-gitignore
timeout-minutes: 40
- name: push translations to repo
uses: appleboy/git-push-action@v0.0.2
with:

View File

@ -1,8 +1,8 @@
name: 'Lock Threads'
name: cron-lock
on:
schedule:
- cron: '0 0 * * *' # Run once a day
- cron: "0 0 * * *" # every day at 00:00 UTC
workflow_dispatch:
permissions:

View File

@ -1,15 +1,14 @@
name: "Cron: Pull translations from Crowdin"
name: cron-translations
on:
schedule:
- cron: "7 0 * * *" # every day at 0:07 UTC
- cron: "7 0 * * *" # every day at 00:07 UTC
jobs:
crowdin_pull:
crowdin-pull:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/checkout@v3
- name: download from crowdin
uses: docker://jonasfranz/crowdin
env:
@ -30,16 +29,16 @@ jobs:
commit_message: "[skip ci] Updated translations via Crowdin"
remote: "git@github.com:go-gitea/gitea.git"
ssh_key: ${{ secrets.DEPLOY_KEY }}
crowdin_push:
crowdin-push:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/checkout@v3
- name: push translations to crowdin
uses: docker://jonasfranz/crowdin
env:
CROWDIN_KEY: ${{ secrets.CROWDIN_KEY }}
PLUGIN_UPLOAD: true
PLUGIN_EXPORT_DIR: options/locale/
PLUGIN_IGNORE_BRANCH: true
PLUGIN_PROJECT_IDENTIFIER: gitea
PLUGIN_FILES: |

View File

@ -1,4 +1,4 @@
name: "Docs: Publish"
name: publish-docs
on:
push:
@ -11,12 +11,10 @@ jobs:
compliance-docs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20.1'
go-version: ">=1.20.1"
- name: build docs
run: |
cd docs

View File

@ -0,0 +1,22 @@
name: compliance-docs
on:
pull_request:
paths:
- "docs/**"
- "*.md"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
compliance-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 20
- run: make deps-frontend
- run: make lint-md

View File

@ -1,4 +1,4 @@
name: "Pull: Compliance Tests"
name: compliance
on: [pull_request]
@ -7,136 +7,93 @@ concurrency:
cancel-in-progress: true
jobs:
lint_basic:
lint-backend:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20'
go-version: ">=1.20"
check-latest: true
- name: deps-backend
run: make deps-backend deps-tools
- name: lint backend
run: make lint-backend
- run: make deps-backend deps-tools
- run: make lint-backend
env:
GOPROXY: https://goproxy.io # proxy.golang.org is blocked in China, this proxy is not
GOSUMDB: sum.golang.org
TAGS: bindata sqlite sqlite_unlock_notify
lint_windows:
lint-go-windows:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20'
go-version: ">=1.20"
check-latest: true
- name: deps-backend
run: make deps-backend deps-tools
- name: lint-backend-windows
run: make lint-go-windows lint-go-vet
- run: make deps-backend deps-tools
- run: make lint-go-windows lint-go-vet
env:
GOPROXY: https://goproxy.io # proxy.golang.org is blocked in China, this proxy is not
GOSUMDB: sum.golang.org
TAGS: bindata sqlite sqlite_unlock_notify
GOOS: windows
GOARCH: amd64
lint_gogit:
lint-go-gogit:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20'
go-version: ">=1.20"
check-latest: true
- name: deps-backend
run: make deps-backend deps-tools
- name: lint-backend-gogit
run: make lint-backend
- run: make deps-backend deps-tools
- run: make lint-go
env:
GOPROXY: https://goproxy.io # proxy.golang.org is blocked in China, this proxy is not
GOSUMDB: sum.golang.org
TAGS: bindata gogit sqlite sqlite_unlock_notify
- name: checks backend
run: make --always-make checks-backend # ensure the 'go-licenses' make target runs
check_backend:
checks-backend:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20'
go-version: ">=1.20"
check-latest: true
- name: deps-backend
run: make deps-backend deps-tools
- name: checks backend
run: make --always-make checks-backend # ensure the 'go-licenses' make target runs
- run: make deps-backend deps-tools
- run: make --always-make checks-backend # ensure the "go-licenses" make target runs
frontend:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup node
uses: actions/setup-node@v3
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 20
- name: deps-frontend
run: make deps-frontend
- name: lint frontend
run: make lint-frontend
- name: checks frontend
run: make checks-frontend
- name: test frontend
run: make test-frontend
- run: make deps-frontend
- run: make lint-frontend
- run: make checks-frontend
backend:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20'
go-version: ">=1.20"
check-latest: true
- name: setup node
uses: actions/setup-node@v3
- uses: actions/setup-node@v3
with:
node-version: 20
- name: deps-backend
run: make deps-backend deps-tools
- name: deps-frontend
run: make deps-frontend
- name: build frontend
run: make frontend
- name: build-backend-no-gcc
run: go build -o gitea_no_gcc # test if build succeeds without the sqlite tag
env:
GOPROXY: https://goproxy.io
- run: make deps-backend deps-tools
- run: make deps-frontend
- run: make frontend
- run: go build -o gitea_no_gcc # test if build succeeds without the sqlite tag
- name: build-backend-arm64
run: make backend # test cross compile
env:
GOPROXY: https://goproxy.io
GOOS: linux
GOARCH: arm64
TAGS: bindata gogit
- name: build-backend-windows
run: go build -o gitea_windows
env:
GOPROXY: https://goproxy.io
GOOS: windows
GOARCH: amd64
TAGS: bindata gogit
- name: build-backend-386
run: go build -o gitea_linux_386 # test if compatible with 32 bit
env:
GOPROXY: https://goproxy.io
GOOS: linux
GOARCH: 386

View File

@ -1,26 +0,0 @@
name: "Pull: Compliance testing for documentation"
on:
pull_request:
paths:
- "docs/**"
- "*.md"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
compliance-docs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup node
uses: actions/setup-node@v3
with:
node-version: 20
- name: install dependencies
run: make deps-frontend
- name: lint markdown
run: make lint-md

View File

@ -1,4 +1,4 @@
name: "Pull: Database Tests"
name: db-tests
on: [pull_request]
@ -7,8 +7,7 @@ concurrency:
cancel-in-progress: true
jobs:
# PostgreSQL Tests
db_pgsql_test:
test-pgsql:
runs-on: ubuntu-latest
services:
pgsql:
@ -33,61 +32,45 @@ jobs:
ports:
- "9000:9000"
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20.0'
go-version: ">=1.20.0"
- name: Add hosts to /etc/hosts
run: echo "127.0.0.1 pgsql ldap minio" | sudo tee -a /etc/hosts
- name: install dependencies
run: make deps-backend
- name: build
run: make backend
- run: make deps-backend
- run: make backend
env:
GOPROXY: https://goproxy.io
GOSUMDB: sum.golang.org
TAGS: bindata
- name: run tests
run: timeout -s ABRT 50m make test-pgsql-migration test-pgsql
- run: make test-pgsql-migration test-pgsql
timeout-minutes: 50
env:
GOPROXY: https://goproxy.io
TAGS: bindata gogit
RACE_ENABLED: true
TEST_TAGS: gogit
TEST_LDAP: 1
USE_REPO_TEST_DIR: 1
# SQLite Tests
db_sqlite_test:
test-sqlite:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20.0'
- name: install dependencies
run: make deps-backend
- name: build
run: make backend
go-version: ">=1.20.0"
- run: make deps-backend
- run: make backend
env:
GOPROXY: https://goproxy.io
GOSUMDB: sum.golang.org
TAGS: bindata gogit sqlite sqlite_unlock_notify
- name: run tests
run: timeout -s ABRT 50m make test-sqlite-migration test-sqlite
- run: make test-sqlite-migration test-sqlite
timeout-minutes: 50
env:
GOPROXY: https://goproxy.io
TAGS: bindata gogit sqlite sqlite_unlock_notify
RACE_ENABLED: true
TEST_TAGS: gogit sqlite sqlite_unlock_notify
USE_REPO_TEST_DIR: 1
# Unit Tests
db_unit_tests:
test-unit:
runs-on: ubuntu-latest
services:
mysql:
@ -110,40 +93,47 @@ jobs:
- "143:143"
- "587:587"
- "993:993"
redis:
image: redis
options: >- # wait until redis has started
--health-cmd "redis-cli ping"
--health-interval 5s
--health-timeout 3s
--health-retries 10
ports:
- 6379:6379
minio:
image: bitnami/minio:2021.3.17
env:
MINIO_ACCESS_KEY: 123456
MINIO_SECRET_KEY: 12345678
ports:
- "9000:9000"
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20.0'
go-version: ">=1.20.0"
- name: Add hosts to /etc/hosts
run: echo "127.0.0.1 mysql elasticsearch smtpimap" | sudo tee -a /etc/hosts
- name: install dependencies
run: make deps-backend
- name: build
run: make backend
- run: make deps-backend
- run: make backend
env:
GOPROXY: https://goproxy.io
GOSUMDB: sum.golang.org
TAGS: bindata
- name: unit tests
- name: unit-tests
run: make unit-test-coverage test-check
env:
GOPROXY: https://goproxy.io
TAGS: bindata
RACE_ENABLED: true
GITHUB_READ_TOKEN: ${{ secrets.GITHUB_READ_TOKEN }}
- name: unit tests (gogit)
- name: unit-tests-gogit
run: make unit-test-coverage test-check
env:
GOPROXY: https://goproxy.io
TAGS: bindata gogit
RACE_ENABLED: true
GITHUB_READ_TOKEN: ${{ secrets.GITHUB_READ_TOKEN }}
# MySQL Tests
db_mysql_test:
test-mysql5:
runs-on: ubuntu-latest
services:
mysql:
@ -167,33 +157,25 @@ jobs:
- "587:587"
- "993:993"
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20.0'
go-version: ">=1.20.0"
- name: Add hosts to /etc/hosts
run: echo "127.0.0.1 mysql elasticsearch smtpimap" | sudo tee -a /etc/hosts
- name: install dependencies
run: make deps-backend
- name: build
run: make backend
- run: make deps-backend
- run: make backend
env:
GOPROXY: https://goproxy.io
GOSUMDB: sum.golang.org
TAGS: bindata
- name: run tests
run: make test-mysql-migration integration-test-coverage
env:
GOPROXY: https://goproxy.io
TAGS: bindata
RACE_ENABLED: true
USE_REPO_TEST_DIR: 1
TEST_INDEXER_CODE_ES_URL: "http://elastic:changeme@elasticsearch:9200"
# MySQL8 Tests
db_mysql8_test:
test-mysql8:
runs-on: ubuntu-latest
services:
mysql8:
@ -204,31 +186,23 @@ jobs:
ports:
- "3306:3306"
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20.0'
go-version: ">=1.20.0"
- name: Add hosts to /etc/hosts
run: echo "127.0.0.1 mysql8" | sudo tee -a /etc/hosts
- name: install dependencies
run: make deps-backend
- name: build
run: make backend
- run: make deps-backend
- run: make backend
env:
GOPROXY: https://goproxy.io
GOSUMDB: sum.golang.org
TAGS: bindata
- name: run tests
run: timeout -s ABRT 50m make test-mysql8-migration test-mysql8
- run: make test-mysql8-migration test-mysql8
timeout-minutes: 50
env:
GOPROXY: https://goproxy.io
TAGS: bindata
USE_REPO_TEST_DIR: 1
# MSSQL Tests
db_mssql_test:
test-mssql:
runs-on: ubuntu-latest
services:
mssql:
@ -240,25 +214,18 @@ jobs:
ports:
- "1433:1433"
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '>=1.20.0'
go-version: ">=1.20.0"
- name: Add hosts to /etc/hosts
run: echo "127.0.0.1 mssql" | sudo tee -a /etc/hosts
- name: install dependencies
run: make deps-backend
- name: build
run: make backend
- run: make deps-backend
- run: make backend
env:
GOPROXY: https://goproxy.io
GOSUMDB: sum.golang.org
TAGS: bindata
- name: run tests
run: timeout -s ABRT 50m make test-mssql-migration test-mssql
- run: make test-mssql-migration test-mssql
timeout-minutes: 50
env:
GOPROXY: https://goproxy.io
TAGS: bindata
USE_REPO_TEST_DIR: 1

View File

@ -0,0 +1,17 @@
name: docker-dryrun
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
docker-dryrun:
runs-on: ubuntu-latest
steps:
- uses: docker/setup-buildx-action@v2
- uses: docker/build-push-action@v4
with:
push: false
tags: gitea/gitea:linux-amd64

View File

@ -1,23 +0,0 @@
name: "Pull: Docker Dry Run"
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
docker_dryrun:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build and push
uses: docker/build-push-action@v4
with:
push: false
tags: gitea/gitea:linux-amd64
build-args: |
GOPROXY=https://goproxy.io

26
.github/workflows/pull-e2e-tests.yml vendored Normal file
View File

@ -0,0 +1,26 @@
name: e2e-tests
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test-e2e:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: ">=1.20"
check-latest: true
- uses: actions/setup-node@v3
with:
node-version: 20
- run: make deps-frontend frontend deps-backend
- run: npx playwright install --with-deps
- run: make test-e2e-sqlite
timeout-minutes: 40
env:
USE_REPO_TEST_DIR: 1

View File

@ -1,33 +0,0 @@
name: "Pull: E2E Tests"
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
e2e_tests:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup go
uses: actions/setup-go@v4
with:
go-version: '>=1.20'
check-latest: true
- name: setup node
uses: actions/setup-node@v3
with:
node-version: 20
- name: build
run: make deps-frontend frontend deps-backend
- name: Install playwright browsers
run: npx playwright install --with-deps
- name: run tests
run: timeout -s ABRT 40m make test-e2e-sqlite
env:
GOPROXY: https://goproxy.io
GOSUMDB: sum.golang.org
USE_REPO_TEST_DIR: 1

View File

@ -31,7 +31,7 @@ vscode:
- golang.go
- stylelint.vscode-stylelint
- DavidAnson.vscode-markdownlint
- johnsoncodehk.volar
- Vue.volar
- ms-azuretools.vscode-docker
- zixuanchen.vitest-explorer
- alexcvzz.vscode-sqlite

View File

@ -1,5 +1,5 @@
#Build stage
FROM docker.io/library/golang:1.20-alpine3.17 AS build-env
FROM docker.io/library/golang:1.20-alpine3.18 AS build-env
ARG GOPROXY
ENV GOPROXY ${GOPROXY:-direct}
@ -23,7 +23,7 @@ RUN if [ -n "${GITEA_VERSION}" ]; then git checkout "${GITEA_VERSION}"; fi \
# Begin env-to-ini build
RUN go build contrib/environment-to-ini/environment-to-ini.go
FROM docker.io/library/alpine:3.17
FROM docker.io/library/alpine:3.18
LABEL maintainer="maintainers@gitea.io"
EXPOSE 22 3000

View File

@ -1,5 +1,5 @@
#Build stage
FROM docker.io/library/golang:1.20-alpine3.17 AS build-env
FROM docker.io/library/golang:1.20-alpine3.18 AS build-env
ARG GOPROXY
ENV GOPROXY ${GOPROXY:-direct}
@ -23,7 +23,7 @@ RUN if [ -n "${GITEA_VERSION}" ]; then git checkout "${GITEA_VERSION}"; fi \
# Begin env-to-ini build
RUN go build contrib/environment-to-ini/environment-to-ini.go
FROM docker.io/library/alpine:3.17
FROM docker.io/library/alpine:3.18
LABEL maintainer="maintainers@gitea.io"
EXPOSE 2222 3000

View File

@ -394,7 +394,7 @@ lint-go:
lint-go-fix:
$(GO) run $(GOLANGCI_LINT_PACKAGE) run --fix
# workaround step for the lint-backend-windows CI task because 'go run' can not
# workaround step for the lint-go-windows CI task because 'go run' can not
# have distinct GOOS/GOARCH for its build and run steps
.PHONY: lint-go-windows
lint-go-windows:
@ -409,7 +409,7 @@ lint-go-vet:
.PHONY: lint-editorconfig
lint-editorconfig:
$(GO) run $(EDITORCONFIG_CHECKER_PACKAGE) templates
$(GO) run $(EDITORCONFIG_CHECKER_PACKAGE) templates .github/workflows
.PHONY: watch
watch:

View File

@ -8,6 +8,7 @@ import (
"fmt"
"strings"
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
git_model "code.gitea.io/gitea/models/git"
"code.gitea.io/gitea/models/migrations"
@ -32,7 +33,7 @@ var CmdMigrateStorage = cli.Command{
cli.StringFlag{
Name: "type, t",
Value: "",
Usage: "Type of stored files to copy. Allowed types: 'attachments', 'lfs', 'avatars', 'repo-avatars', 'repo-archivers', 'packages'",
Usage: "Type of stored files to copy. Allowed types: 'attachments', 'lfs', 'avatars', 'repo-avatars', 'repo-archivers', 'packages', 'actions-log'",
},
cli.StringFlag{
Name: "storage, s",
@ -134,6 +135,22 @@ func migratePackages(ctx context.Context, dstStorage storage.ObjectStorage) erro
})
}
func migrateActionsLog(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.Iterate(ctx, nil, func(ctx context.Context, task *actions_model.ActionTask) error {
if task.LogExpired {
// the log has been cleared
return nil
}
if !task.LogInStorage {
// running tasks store logs in DBFS
return nil
}
p := task.LogFilename
_, err := storage.Copy(dstStorage, p, storage.Actions, p)
return err
})
}
func runMigrateStorage(ctx *cli.Context) error {
stdCtx, cancel := installSignals()
defer cancel()
@ -201,6 +218,7 @@ func runMigrateStorage(ctx *cli.Context) error {
"repo-avatars": migrateRepoAvatars,
"repo-archivers": migrateRepoArchivers,
"packages": migratePackages,
"actions-log": migrateActionsLog,
}
tp := strings.ToLower(ctx.String("type"))

View File

@ -52,7 +52,7 @@ After=network.target
# Uncomment the next line if you have repos with lots of files and get a HTTP 500 error because of that
# LimitNOFILE=524288:524288
RestartSec=2s
Type=simple
Type=notify
User=git
Group=git
WorkingDirectory=/var/lib/gitea/
@ -62,6 +62,7 @@ WorkingDirectory=/var/lib/gitea/
ExecStart=/usr/local/bin/gitea web --config /etc/gitea/app.ini
Restart=always
Environment=USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea
WatchdogSec=30s
# If you install Git to directory prefix other than default PATH (which happens
# for example if you install other versions of Git side-to-side with
# distribution version), uncomment below line and add that prefix to PATH

View File

@ -303,7 +303,7 @@ RUN_MODE = ; prod
LFS_JWT_SECRET =
;;
;; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail.
;LFS_HTTP_AUTH_EXPIRY = 20m
;LFS_HTTP_AUTH_EXPIRY = 24h
;;
;; Maximum allowed LFS file size in bytes (Set to 0 for no limit).
;LFS_MAX_FILE_SIZE = 0
@ -940,6 +940,9 @@ ROUTER = console
;; Force ssh:// clone url instead of scp-style uri when default SSH port is used
;USE_COMPAT_SSH_URI = false
;;
;; Value for the "go get" request returns the repository url as https or ssh, default is https
;GO_GET_CLONE_URL_PROTOCOL = https
;;
;; Close issues as long as a commit on any branch marks it as fixed
;; Comma separated list of globally disabled repo units. Allowed values: repo.issues, repo.ext_issues, repo.pulls, repo.wiki, repo.ext_wiki, repo.projects, repo.packages, repo.actions.
;DISABLED_REPO_UNITS =
@ -1422,7 +1425,7 @@ ROUTER = console
;; Batch size to send for batched queues
;BATCH_LENGTH = 20
;;
;; Connection string for redis queues this will store the redis connection string.
;; Connection string for redis queues this will store the redis or redis-cluster connection string.
;; When `TYPE` is `persistable-channel`, this provides a directory for the underlying leveldb
;; or additional options of the form `leveldb://path/to/db?option=value&....`, and will override `DATADIR`.
;CONN_STR = "redis://127.0.0.1:6379/0"
@ -1694,8 +1697,9 @@ ROUTER = console
;; For "memory" only, GC interval in seconds, default is 60
;INTERVAL = 60
;;
;; For "redis" and "memcache", connection host address
;; For "redis", "redis-cluster" and "memcache", connection host address
;; redis: `redis://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`
;; redis-cluster: `redis+cluster://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`
;; memcache: `127.0.0.1:11211`
;; twoqueue: `{"size":50000,"recent_ratio":0.25,"ghost_ratio":0.5}` or `50000`
;HOST =
@ -1727,7 +1731,7 @@ ROUTER = console
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Either "memory", "file", "redis", "db", "mysql", "couchbase", "memcache" or "postgres"
;; Either "memory", "file", "redis", "redis-cluster", "db", "mysql", "couchbase", "memcache" or "postgres"
;; Default is "memory". "db" will reuse the configuration in [database]
;PROVIDER = memory
;;
@ -1735,6 +1739,7 @@ ROUTER = console
;; memory: doesn't have any config yet
;; file: session file path, e.g. `data/sessions`
;; redis: `redis://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`
;; redis-cluster: `redis+cluster://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`
;; mysql: go-sql-driver/mysql dsn config string, e.g. `root:password@/session_table`
;PROVIDER_CONFIG = data/sessions ; Relative paths will be made absolute against _`AppWorkPath`_.
;;
@ -1770,16 +1775,19 @@ ROUTER = console
;; Max Width and Height of uploaded avatars.
;; This is to limit the amount of RAM used when resizing the image.
;AVATAR_MAX_WIDTH = 4096
;AVATAR_MAX_HEIGHT = 3072
;AVATAR_MAX_HEIGHT = 4096
;;
;; The multiplication factor for rendered avatar images.
;; Larger values result in finer rendering on HiDPI devices.
;AVATAR_RENDERED_SIZE_FACTOR = 3
;AVATAR_RENDERED_SIZE_FACTOR = 2
;;
;; Maximum allowed file size for uploaded avatars.
;; This is to limit the amount of RAM used when resizing the image.
;AVATAR_MAX_FILE_SIZE = 1048576
;;
;; If the uploaded file is not larger than this byte size, the image will be used as is, without resizing/converting.
;AVATAR_MAX_ORIGIN_SIZE = 262144
;;
;; Chinese users can choose "duoshuo"
;; or a custom avatar source, like: http://cn.gravatar.com/avatar/
;GRAVATAR_SOURCE = gravatar
@ -2365,7 +2373,7 @@ ROUTER = console
;QUEUE_LENGTH = 1000
;;
;; Task queue connection string, available only when `QUEUE_TYPE` is `redis`.
;; If there is a password of redis, use `redis://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`.
;; If there is a password of redis, use `redis://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s` or `redis+cluster://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s` for `redis-clsuter`.
;QUEUE_CONN_STR = "redis://127.0.0.1:6379/0?pool_size=100&idle_timeout=180s"
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -2439,6 +2447,8 @@ ROUTER = console
;LIMIT_TOTAL_OWNER_COUNT = -1
;; Maximum size of packages a single owner can use (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_TOTAL_OWNER_SIZE = -1
;; Maximum size of an Alpine upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_ALPINE = -1
;; Maximum size of a Cargo upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_CARGO = -1
;; Maximum size of a Chef upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
@ -2455,6 +2465,8 @@ ROUTER = console
;LIMIT_SIZE_DEBIAN = -1
;; Maximum size of a Generic upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_GENERIC = -1
;; Maximum size of a Go upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_GO = -1
;; Maximum size of a Helm upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_HELM = -1
;; Maximum size of a Maven upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)

View File

@ -53,7 +53,7 @@ function doSearch() {
} else {
const para = document.createElement('P');
para.textContent = 'Please enter a word or phrase above';
document.getElementById('search-results').appendChild(para);
document.getElementById('search-results').append(para);
}
}
@ -85,7 +85,7 @@ function executeSearch(searchQuery) {
} else {
const para = document.createElement('P');
para.textContent = 'No matches found';
document.getElementById('search-results').appendChild(para);
document.getElementById('search-results').append(para);
}
});
}
@ -128,7 +128,7 @@ function populateResults(result) {
categories: value.item.categories,
snippet
});
document.getElementById('search-results').appendChild(htmlToElement(output));
document.getElementById('search-results').append(htmlToElement(output));
for (const snipvalue of snippetHighlights) {
new Mark(document.getElementById(`summary-${key}`)).mark(snipvalue);

View File

@ -95,6 +95,8 @@ In addition there is _`StaticRootPath`_ which can be set as a built-in at build
HTTP protocol.
- `USE_COMPAT_SSH_URI`: **false**: Force ssh:// clone url instead of scp-style uri when
default SSH port is used.
- `GO_GET_CLONE_URL_PROTOCOL`: **https**: Value for the "go get" request returns the repository url as https or ssh
default is https.
- `ACCESS_CONTROL_ALLOW_ORIGIN`: **\<empty\>**: Value for Access-Control-Allow-Origin header,
default is not to present. **WARNING**: This maybe harmful to you website if you do not
give it a right value.
@ -364,7 +366,7 @@ The following configuration set `Content-Type: application/vnd.android.package-a
- `LFS_START_SERVER`: **false**: Enables Git LFS support.
- `LFS_CONTENT_PATH`: **%(APP_DATA_PATH)s/lfs**: Default LFS content path. (if it is on local storage.) **DEPRECATED** use settings in `[lfs]`.
- `LFS_JWT_SECRET`: **\<empty\>**: LFS authentication secret, change this a unique string.
- `LFS_HTTP_AUTH_EXPIRY`: **20m**: LFS authentication validity period in time.Duration, pushes taking longer than this may fail.
- `LFS_HTTP_AUTH_EXPIRY`: **24h**: LFS authentication validity period in time.Duration, pushes taking longer than this may fail.
- `LFS_MAX_FILE_SIZE`: **0**: Maximum allowed LFS file size in bytes (Set to 0 for no limit).
- `LFS_LOCKS_PAGING_NUM`: **50**: Maximum number of LFS Locks returned per page.
@ -482,7 +484,7 @@ Configuration at `[queue]` will set defaults for queues with overrides for indiv
- `DATADIR`: **queues/common**: Base DataDir for storing level queues. `DATADIR` for individual queues can be set in `queue.name` sections. Relative paths will be made absolute against `%(APP_DATA_PATH)s`.
- `LENGTH`: **100**: Maximal queue size before channel queues block
- `BATCH_LENGTH`: **20**: Batch data before passing to the handler
- `CONN_STR`: **redis://127.0.0.1:6379/0**: Connection string for the redis queue type. Options can be set using query params. Similarly, LevelDB options can also be set using: **leveldb://relative/path?option=value** or **leveldb:///absolute/path?option=value**, and will override `DATADIR`
- `CONN_STR`: **redis://127.0.0.1:6379/0**: Connection string for the redis queue type. For `redis-cluster` use `redis+cluster://127.0.0.1:6379/0`. Options can be set using query params. Similarly, LevelDB options can also be set using: **leveldb://relative/path?option=value** or **leveldb:///absolute/path?option=value**, and will override `DATADIR`
- `QUEUE_NAME`: **_queue**: The suffix for default redis and disk queue name. Individual queues will default to **`name`**`QUEUE_NAME` but can be overridden in the specific `queue.name` section.
- `SET_NAME`: **_unique**: The suffix that will be added to the default redis and disk queue `set` name for unique queues. Individual queues will default to **`name`**`QUEUE_NAME`_`SET_NAME`_ but can be overridden in the specific `queue.name` section.
- `MAX_WORKERS`: **10**: Maximum number of worker go-routines for the queue.
@ -754,10 +756,11 @@ and
## Cache (`cache`)
- `ENABLED`: **true**: Enable the cache.
- `ADAPTER`: **memory**: Cache engine adapter, either `memory`, `redis`, `twoqueue` or `memcache`. (`twoqueue` represents a size limited LRU cache.)
- `ADAPTER`: **memory**: Cache engine adapter, either `memory`, `redis`, `redis-cluster`, `twoqueue` or `memcache`. (`twoqueue` represents a size limited LRU cache.)
- `INTERVAL`: **60**: Garbage Collection interval (sec), for memory and twoqueue cache only.
- `HOST`: **\<empty\>**: Connection string for `redis` and `memcache`. For `twoqueue` sets configuration for the queue.
- `HOST`: **\<empty\>**: Connection string for `redis`, `redis-cluster` and `memcache`. For `twoqueue` sets configuration for the queue.
- Redis: `redis://:macaron@127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`
- Redis-cluster `redis+cluster://:macaron@127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`
- Memcache: `127.0.0.1:9090;127.0.0.1:9091`
- TwoQueue LRU cache: `{"size":50000,"recent_ratio":0.25,"ghost_ratio":0.5}` or `50000` representing the maximum number of objects stored in the cache.
- `ITEM_TTL`: **16h**: Time to keep items in cache if not used, Setting it to -1 disables caching.
@ -770,7 +773,7 @@ and
## Session (`session`)
- `PROVIDER`: **memory**: Session engine provider \[memory, file, redis, db, mysql, couchbase, memcache, postgres\]. Setting `db` will reuse the configuration in `[database]`
- `PROVIDER`: **memory**: Session engine provider \[memory, file, redis, redis-cluster, db, mysql, couchbase, memcache, postgres\]. Setting `db` will reuse the configuration in `[database]`
- `PROVIDER_CONFIG`: **data/sessions**: For file, the root path; for db, empty (database config will be used); for others, the connection string. Relative paths will be made absolute against _`AppWorkPath`_.
- `COOKIE_SECURE`: **false**: Enable this to force using HTTPS for all session access.
- `COOKIE_NAME`: **i\_like\_gitea**: The name of the cookie used for the session ID.
@ -790,9 +793,10 @@ and
- `AVATAR_STORAGE_TYPE`: **default**: Storage type defined in `[storage.xxx]`. Default is `default` which will read `[storage]` if no section `[storage]` will be a type `local`.
- `AVATAR_UPLOAD_PATH`: **data/avatars**: Path to store user avatar image files.
- `AVATAR_MAX_WIDTH`: **4096**: Maximum avatar image width in pixels.
- `AVATAR_MAX_HEIGHT`: **3072**: Maximum avatar image height in pixels.
- `AVATAR_MAX_FILE_SIZE`: **1048576** (1Mb): Maximum avatar image file size in bytes.
- `AVATAR_RENDERED_SIZE_FACTOR`: **3**: The multiplication factor for rendered avatar images. Larger values result in finer rendering on HiDPI devices.
- `AVATAR_MAX_HEIGHT`: **4096**: Maximum avatar image height in pixels.
- `AVATAR_MAX_FILE_SIZE`: **1048576** (1MiB): Maximum avatar image file size in bytes.
- `AVATAR_MAX_ORIGIN_SIZE`: **262144** (256KiB): If the uploaded file is not larger than this byte size, the image will be used as is, without resizing/converting.
- `AVATAR_RENDERED_SIZE_FACTOR`: **2**: The multiplication factor for rendered avatar images. Larger values result in finer rendering on HiDPI devices.
- `REPOSITORY_AVATAR_STORAGE_TYPE`: **default**: Storage type defined in `[storage.xxx]`. Default is `default` which will read `[storage]` if no section `[storage]` will be a type `local`.
- `REPOSITORY_AVATAR_UPLOAD_PATH`: **data/repo-avatars**: Path to store repository avatar image files.
@ -1016,7 +1020,7 @@ Default templates for project boards:
- `RUN_AT_START`: **false**: Run tasks at start up time (if ENABLED).
- `NOTICE_ON_SUCCESS`: **false**: Set to true to switch on success notices.
- `SCHEDULE`: **@every 168h**: Cron syntax to set how often to check.
- `OLDER_THAN`: **@every 8760h**: any action older than this expression will be deleted from database, suggest using `8760h` (1 year) because that's the max length of heatmap.
- `OLDER_THAN`: **8760h**: any action older than this expression will be deleted from database, suggest using `8760h` (1 year) because that's the max length of heatmap.
#### Cron - Check for new Gitea versions (`cron.update_checker`)
@ -1032,7 +1036,7 @@ Default templates for project boards:
- `RUN_AT_START`: **false**: Run tasks at start up time (if ENABLED).
- `NO_SUCCESS_NOTICE`: **false**: Set to true to switch off success notices.
- `SCHEDULE`: **@every 168h**: Cron syntax to set how often to check.
- `OLDER_THAN`: **@every 8760h**: any system notice older than this expression will be deleted from database.
- `OLDER_THAN`: **8760h**: any system notice older than this expression will be deleted from database.
#### Cron - Garbage collect LFS pointers in repositories (`cron.gc_lfs`)
@ -1181,7 +1185,7 @@ Task queue configuration has been moved to `queue.task`. However, the below conf
- `QUEUE_TYPE`: **channel**: Task queue type, could be `channel` or `redis`.
- `QUEUE_LENGTH`: **1000**: Task queue length, available only when `QUEUE_TYPE` is `channel`.
- `QUEUE_CONN_STR`: **redis://127.0.0.1:6379/0**: Task queue connection string, available only when `QUEUE_TYPE` is `redis`. If redis needs a password, use `redis://123@127.0.0.1:6379/0`.
- `QUEUE_CONN_STR`: **redis://127.0.0.1:6379/0**: Task queue connection string, available only when `QUEUE_TYPE` is `redis`. If redis needs a password, use `redis://123@127.0.0.1:6379/0` or `redis+cluster://123@127.0.0.1:6379/0`.
## Migrations (`migrations`)
@ -1211,6 +1215,7 @@ Task queue configuration has been moved to `queue.task`. However, the below conf
- `CHUNKED_UPLOAD_PATH`: **tmp/package-upload**: Path for chunked uploads. Defaults to `APP_DATA_PATH` + `tmp/package-upload`
- `LIMIT_TOTAL_OWNER_COUNT`: **-1**: Maximum count of package versions a single owner can have (`-1` means no limits)
- `LIMIT_TOTAL_OWNER_SIZE`: **-1**: Maximum size of packages a single owner can use (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_ALPINE`: **-1**: Maximum size of an Alpine upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_CARGO`: **-1**: Maximum size of a Cargo upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_CHEF`: **-1**: Maximum size of a Chef upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_COMPOSER`: **-1**: Maximum size of a Composer upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
@ -1219,6 +1224,7 @@ Task queue configuration has been moved to `queue.task`. However, the below conf
- `LIMIT_SIZE_CONTAINER`: **-1**: Maximum size of a Container upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_DEBIAN`: **-1**: Maximum size of a Debian upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_GENERIC`: **-1**: Maximum size of a Generic upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_GO`: **-1**: Maximum size of a Go upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_HELM`: **-1**: Maximum size of a Helm upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_MAVEN`: **-1**: Maximum size of a Maven upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_NPM`: **-1**: Maximum size of a npm upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)

View File

@ -214,8 +214,8 @@ menu:
- `AVATAR_STORAGE_TYPE`: **local**: 头像存储类型,可以为 `local``minio`,分别支持本地文件系统和 minio 兼容的API。
- `AVATAR_UPLOAD_PATH`: **data/avatars**: 存储头像的文件系统路径。
- `AVATAR_MAX_WIDTH`: **4096**: 头像最大宽度,单位像素。
- `AVATAR_MAX_HEIGHT`: **3072**: 头像最大高度,单位像素。
- `AVATAR_MAX_FILE_SIZE`: **1048576** (1Mb): 头像最大大小。
- `AVATAR_MAX_HEIGHT`: **4096**: 头像最大高度,单位像素。
- `AVATAR_MAX_FILE_SIZE`: **1048576** (1MiB): 头像最大大小。
- `REPOSITORY_AVATAR_STORAGE_TYPE`: **local**: 仓库头像存储类型,可以为 `local``minio`,分别支持本地文件系统和 minio 兼容的API。
- `REPOSITORY_AVATAR_UPLOAD_PATH`: **data/repo-avatars**: 存储仓库头像的路径。

View File

@ -25,12 +25,13 @@ menu:
If you want Nginx to serve your Gitea instance, add the following `server` section to the `http` section of `nginx.conf`:
```apacheconf
```
server {
listen 80;
server_name git.example.com;
location / {
client_max_body_size 512M;
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
@ -40,23 +41,32 @@ server {
}
```
### Resolving Error: 413 Request Entity Too Large
This error indicates nginx is configured to restrict the file upload size,
it affects attachment uploading, form posting, package uploading and LFS pushing, etc.
You can fine tune the `client_max_body_size` option according to [nginx document](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size).
## Nginx with a sub-path
In case you already have a site, and you want Gitea to share the domain name, you can setup Nginx to serve Gitea under a sub-path by adding the following `server` section inside the `http` section of `nginx.conf`:
```apacheconf
```
server {
listen 80;
server_name git.example.com;
# Note: Trailing slash
location /git/ {
# Note: Trailing slash
proxy_pass http://localhost:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
location /gitea/ {
client_max_body_size 512M;
# make nginx use unescaped URI, keep "%2F" as is
rewrite ^ $request_uri;
rewrite ^/gitea(/.*) $1 break;
proxy_pass http://127.0.0.1:3000$uri;
# other common HTTP headers, see the "Nginx" config section above
proxy_set_header ...
}
}
```
@ -132,14 +142,6 @@ server {
}
```
## Resolving Error: 413 Request Entity Too Large
This error indicates nginx is configured to restrict the file upload size.
In your nginx config file containing your Gitea proxy directive, find the `location { ... }` block for Gitea and add the line
`client_max_body_size 16M;` to set this limit to 16 megabytes or any other number of choice.
If you use Git LFS, this will also limit the size of the largest file you will be able to push.
## Apache HTTPD
If you want Apache HTTPD to serve your Gitea instance, you can add the following to your Apache HTTPD configuration (usually located at `/etc/apache2/httpd.conf` in Ubuntu):
@ -387,3 +389,13 @@ gitea:
This config assumes that you are handling HTTPS on the traefik side and using HTTP between Gitea and traefik.
Then you **MUST** set something like `[server] ROOT_URL = http://example.com/gitea/` correctly in your configuration.
## General sub-path configuration
Usually it's not recommended to put Gitea in a sub-path, it's not widely used and may have some issues in rare cases.
If you really need to do so, to make Gitea works with sub-path (eg: `http://example.com/gitea/`), here are the requirements:
1. Set `[server] ROOT_URL = http://example.com/gitea/` in your `app.ini` file.
2. Make the reverse-proxy pass `http://example.com/gitea/foo` to `http://gitea-server:3000/foo`.
3. Make sure the reverse-proxy not decode the URI, the request `http://example.com/gitea/a%2Fb` should be passed as `http://gitea-server:3000/a%2Fb`.

View File

@ -168,7 +168,7 @@ make lint-backend
### 处理 JS 和 CSS
前端开发应遵循 [Guidelines for Frontend Development]({{ < 相关参考 "doc/development/guidelines-frontend.en-us.md" > }})
前端开发应遵循 [Guidelines for Frontend Development]({{< relref "doc/contributing/guidelines-frontend.zh-cn.md" >}})。
要使用前端资源构建请使用上面提到的“watch-frontend”目标或只构建一次

View File

@ -1,6 +1,6 @@
---
date: "2019-02-14T11:51:04+08:00"
title: "横向对比 Gitea 与其它 Git 托管工具"
title: "对比 Gitea 与其它 Git 托管工具"
slug: "comparison"
weight: 5
toc: false
@ -15,7 +15,7 @@ menu:
identifier: "comparison"
---
# 横向对比 Gitea 与其它 Git 托管工具
# 对比 Gitea 与其它 Git 托管工具
这里列出了 Gitea 与其它一些 Git 托管工具之间的异同,以便确认 Gitea 是否能够满足您的需求。

View File

@ -80,6 +80,7 @@ git --version
Create a user to run Gitea (e.g. `git`)
```sh
# On Ubuntu/Debian:
adduser \
--system \
--shell /bin/bash \
@ -88,6 +89,17 @@ adduser \
--disabled-password \
--home /home/git \
git
# On Fedora/RHEL/CentOS:
groupadd --system git
adduser \
--system \
--shell /bin/bash \
--comment 'Git Version Control' \
--gid git \
--home-dir /home/git \
--create-home \
git
```
### Create required directory structure

View File

@ -74,6 +74,7 @@ git --version
创建用户(推荐使用名称 `git`
```sh
# On Ubuntu/Debian:
adduser \
--system \
--shell /bin/bash \
@ -82,6 +83,17 @@ adduser \
--disabled-password \
--home /home/git \
git
# On Fedora/RHEL/CentOS:
groupadd --system git
adduser \
--system \
--shell /bin/bash \
--comment 'Git Version Control' \
--gid git \
--home-dir /home/git \
--create-home \
git
```
### 创建工作路径

View File

@ -138,7 +138,7 @@ You may be confused about the runner labels, which will be explained later.
If you want to register the runner in a non-interactive way, you can use arguments to do it.
```bash
./act_runner register --no-interactive --instance <intance_url> --token <registration_token> --name <runner_name> --labels <runner_labels>
./act_runner register --no-interactive --instance <instance_url> --token <registration_token> --name <runner_name> --labels <runner_labels>
```
When you have registered the runner, you can find a new file named `.runner` in the current directory.

View File

@ -132,7 +132,7 @@ The missing host will be filled with `https://gitea.com` if you don't configure
That means `uses: actions/checkout@v3` will download the action from [gitea.com/actions/checkout](https://gitea.com/actions/checkout), instead of [github.com/actions/checkout](https://github.com/actions/checkout).
As mentioned, it's configurable.
If you want your runners to download actions from GitHub or your own Gitea instance by default, you can configure it by setting `[actions].DEFAULT_ACTIONS_URL`. See [Configuration Cheat Sheet](({{ < relref "doc/administration/config-cheat-sheet.en-us.md#actions-actions" > }})).
If you want your runners to download actions from GitHub or your own Gitea instance by default, you can configure it by setting `[actions].DEFAULT_ACTIONS_URL`. See [Configuration Cheat Sheet]({{< relref "doc/administration/config-cheat-sheet.en-us.md#actions-actions" >}}).
### Context availability

View File

@ -24,7 +24,7 @@ This page will guide you through the process of using Gitea Actions.
## Set up Gitea
First of all, you need a Gitea instance.
You can follow the [documentation]({{ < relref "doc/installation/from-package.en-us.md" > }}) to set up a new instance or upgrade your existing one.
You can follow the [documentation]({{< relref "doc/installation/from-package.en-us.md" >}}) to set up a new instance or upgrade your existing one.
It doesn't matter how you install or run Gitea, as long as its version is 1.19.0 or higher.
Actions are disabled by default, so you need to add the following to the configuration file to enable it:
@ -34,7 +34,7 @@ Actions are disabled by default, so you need to add the following to the configu
ENABLED=true
```
If you want to learn more or encounter any problems while configuring it, please refer to the [Configuration Cheat Sheet]({{ < relref "doc/administration/config-cheat-sheet.en-us.md#actions-actions" > }}).
If you want to learn more or encounter any problems while configuring it, please refer to the [Configuration Cheat Sheet]({{< relref "doc/administration/config-cheat-sheet.en-us.md#actions-actions" >}}).
### Set up runner
@ -85,7 +85,7 @@ And you can see the new runner in the management page:
![view runner](/images/usage/actions/view-runner.png)
You can find more information by visiting [Act runner]({{ < relref "doc/actions/act-runner.en-us.md" > }}).
You can find more information by visiting [Act runner]({{< relref "doc/usage/actions/act-runner.en-us.md" >}}).
### Use Actions
@ -119,7 +119,7 @@ jobs:
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}
- run: echo "🍏 This job's status is ${{ gitea.status }}."
- run: echo "🍏 This job's status is ${{ job.status }}."
```
You can upload it as a file with the extension `.yaml` in the directory `.gitea/workflows/` of the repository, for example `.gitea/workflows/demo.yaml`.
@ -129,7 +129,7 @@ That is because Gitea Actions is designed to be compatible with GitHub Actions
Be careful, the demo file contains some emojis.
Please make sure your database supports them, especially when using MySQL.
If the charset is not `utf8mb4`, errors will occur, such as `Error 1366 (HY000): Incorrect string value: '\\xF0\\x9F\\x8E\\x89 T...' for column 'name' at row 1`.
See [Database Preparation]( {{ < relref "doc/installation/database-preparation.en-us.md#mysql" > }}) for more information.
See [Database Preparation]({{< relref "doc/installation/database-preparation.en-us.md#mysql" >}}) for more information.
Alternatively, you can remove all emojis from the demo file and try again.

View File

@ -0,0 +1,133 @@
---
date: "2023-03-25T00:00:00+00:00"
title: "Alpine Packages Repository"
slug: "packages/alpine"
draft: false
toc: false
menu:
sidebar:
parent: "packages"
name: "Alpine"
weight: 4
identifier: "alpine"
---
# Alpine Packages Repository
Publish [Alpine](https://pkgs.alpinelinux.org/) packages for your user or organization.
**Table of Contents**
{{< toc >}}
## Requirements
To work with the Alpine registry, you need to use a HTTP client like `curl` to upload and a package manager like `apk` to consume packages.
The following examples use `apk`.
## Configuring the package registry
To register the Alpine registry add the url to the list of known apk sources (`/etc/apk/repositories`):
```
https://gitea.example.com/api/packages/{owner}/alpine/<branch>/<repository>
```
| Placeholder | Description |
| ------------ | ----------- |
| `owner` | The owner of the packages. |
| `branch` | The branch to use. |
| `repository` | The repository to use. |
If the registry is private, provide credentials in the url. You can use a password or a [personal access token]({{< relref "doc/development/api-usage.en-us.md#authentication" >}}):
```
https://{username}:{your_password_or_token}@gitea.example.com/api/packages/{owner}/alpine/<branch>/<repository>
```
The Alpine registry files are signed with a RSA key which must be known to apk. Download the public key and store it in `/etc/apk/keys/`:
```shell
curl -JO https://gitea.example.com/api/packages/{owner}/alpine/key
```
Afterwards update the local package index:
```shell
apk update
```
## Publish a package
To publish an Alpine package (`*.apk`), perform a HTTP `PUT` operation with the package content in the request body.
```
PUT https://gitea.example.com/api/packages/{owner}/alpine/{branch}/{repository}
```
| Parameter | Description |
| ------------ | ----------- |
| `owner` | The owner of the package. |
| `branch` | The branch may match the release version of the OS, ex: `v3.17`. |
| `repository` | The repository can be used [to group packages](https://wiki.alpinelinux.org/wiki/Repositories) or just `main` or similar. |
Example request using HTTP Basic authentication:
```shell
curl --user your_username:your_password_or_token \
--upload-file path/to/file.apk \
https://gitea.example.com/api/packages/testuser/alpine/v3.17/main
```
If you are using 2FA or OAuth use a [personal access token]({{< relref "doc/development/api-usage.en-us.md#authentication" >}}) instead of the password.
You cannot publish a file with the same name twice to a package. You must delete the existing package file first.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
| `201 Created` | The package has been published. |
| `400 Bad Request` | The package name, version, branch, repository or architecture are invalid. |
| `409 Conflict` | A package file with the same combination of parameters exist already in the package. |
## Delete a package
To delete an Alpine package perform a HTTP `DELETE` operation. This will delete the package version too if there is no file left.
```
DELETE https://gitea.example.com/api/packages/{owner}/alpine/{branch}/{repository}/{architecture}/{filename}
```
| Parameter | Description |
| -------------- | ----------- |
| `owner` | The owner of the package. |
| `branch` | The branch to use. |
| `repository` | The repository to use. |
| `architecture` | The package architecture. |
| `filename` | The file to delete.
Example request using HTTP Basic authentication:
```shell
curl --user your_username:your_token_or_password -X DELETE \
https://gitea.example.com/api/packages/testuser/alpine/v3.17/main/test-package-1.0.0.apk
```
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
| `204 No Content` | Success |
| `404 Not Found` | The package or file was not found. |
## Install a package
To install a package from the Alpine registry, execute the following commands:
```shell
# use latest version
apk add {package_name}
# use specific version
apk add {package_name}={package_version}
```

View File

@ -83,7 +83,7 @@ curl --user your_username:your_password_or_token \
If you are using 2FA or OAuth use a [personal access token]({{< relref "doc/development/api-usage.en-us.md#authentication" >}}) instead of the password.
You cannot publish a file with the same name twice to a package. You must delete the existing package version first.
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
@ -115,7 +115,7 @@ curl --user your_username:your_token_or_password -X DELETE \
https://gitea.example.com/api/packages/testuser/debian/pools/bionic/main/test-package/1.0.0/amd64
```
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |

View File

@ -51,7 +51,7 @@ curl --user your_username:your_password_or_token \
If you are using 2FA or OAuth use a [personal access token]({{< relref "doc/development/api-usage.en-us.md#authentication" >}}) instead of the password.
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
@ -83,7 +83,7 @@ curl --user your_username:your_token_or_password \
https://gitea.example.com/api/packages/testuser/generic/test_package/1.0.0/file.bin
```
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
@ -111,7 +111,7 @@ curl --user your_username:your_token_or_password -X DELETE \
https://gitea.example.com/api/packages/testuser/generic/test_package/1.0.0
```
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
@ -140,7 +140,7 @@ curl --user your_username:your_token_or_password -X DELETE \
https://gitea.example.com/api/packages/testuser/generic/test_package/1.0.0/file.bin
```
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |

View File

@ -0,0 +1,77 @@
---
date: "2023-05-10T00:00:00+00:00"
title: "Go Packages Repository"
slug: "go"
weight: 45
draft: false
toc: false
menu:
sidebar:
parent: "packages"
name: "Go"
weight: 45
identifier: "go"
---
# Go Packages Repository
Publish Go packages for your user or organization.
**Table of Contents**
{{< toc >}}
## Publish a package
To publish a Go package perform a HTTP `PUT` operation with the package content in the request body.
You cannot publish a package if a package of the same name and version already exists. You must delete the existing package first.
The package must follow the [documented structure](https://go.dev/ref/mod#zip-files).
```
PUT https://gitea.example.com/api/packages/{owner}/go/upload
```
| Parameter | Description |
| --------- | ----------- |
| `owner` | The owner of the package. |
To authenticate to the package registry, you need to provide [custom HTTP headers or use HTTP Basic authentication]({{< relref "doc/development/api-usage.en-us.md#authentication" >}}):
```shell
curl --user your_username:your_password_or_token \
--upload-file path/to/file.zip \
https://gitea.example.com/api/packages/testuser/go/upload
```
If you are using 2FA or OAuth use a [personal access token]({{< relref "doc/development/api-usage.en-us.md#authentication" >}}) instead of the password.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
| `201 Created` | The package has been published. |
| `400 Bad Request` | The package is invalid. |
| `409 Conflict` | A package with the same name exist already. |
## Install a package
To install a Go package instruct Go to use the package registry as proxy:
```shell
# use latest version
GOPROXY=https://gitea.example.com/api/packages/{owner}/go go install {package_name}
# or
GOPROXY=https://gitea.example.com/api/packages/{owner}/go go install {package_name}@latest
# use specific version
GOPROXY=https://gitea.example.com/api/packages/{owner}/go go install {package_name}@{package_version}
```
| Parameter | Description |
| ----------------- | ----------- |
| `owner` | The owner of the package. |
| `package_name` | The package name. |
| `package_version` | The package version. |
If the owner of the packages is private you need to [provide credentials](https://go.dev/ref/mod#private-module-proxy-auth).
More information about the `GOPROXY` environment variable and how to protect against data leaks can be found in [the documentation](https://go.dev/ref/mod#private-modules).

View File

@ -27,6 +27,7 @@ The following package managers are currently supported:
| Name | Language | Package client |
| ---- | -------- | -------------- |
| [Alpine]({{< relref "doc/usage/packages/alpine.en-us.md" >}}) | - | `apk` |
| [Cargo]({{< relref "doc/usage/packages/cargo.en-us.md" >}}) | Rust | `cargo` |
| [Chef]({{< relref "doc/usage/packages/chef.en-us.md" >}}) | - | `knife` |
| [Composer]({{< relref "doc/usage/packages/composer.en-us.md" >}}) | PHP | `composer` |
@ -35,6 +36,7 @@ The following package managers are currently supported:
| [Container]({{< relref "doc/usage/packages/container.en-us.md" >}}) | - | any OCI compliant client |
| [Debian]({{< relref "doc/usage/packages/debian.en-us.md" >}}) | - | `apt` |
| [Generic]({{< relref "doc/usage/packages/generic.en-us.md" >}}) | - | any HTTP client |
| [Go]({{< relref "doc/usage/packages/go.en-us.md" >}}) | Go | `go` |
| [Helm]({{< relref "doc/usage/packages/helm.en-us.md" >}}) | - | any HTTP client, `cm-push` |
| [Maven]({{< relref "doc/usage/packages/maven.en-us.md" >}}) | Java | `mvn`, `gradle` |
| [npm]({{< relref "doc/usage/packages/npm.en-us.md" >}}) | JavaScript | `npm`, `yarn`, `pnpm` |

View File

@ -69,7 +69,7 @@ curl --user your_username:your_password_or_token \
If you are using 2FA or OAuth use a [personal access token]({{< relref "doc/development/api-usage.en-us.md#authentication" >}}) instead of the password.
You cannot publish a file with the same name twice to a package. You must delete the existing package version first.
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |
@ -99,7 +99,7 @@ curl --user your_username:your_token_or_password -X DELETE \
https://gitea.example.com/api/packages/testuser/rpm/test-package/1.0.0/x86_64
```
The server reponds with the following HTTP Status codes.
The server responds with the following HTTP Status codes.
| HTTP Status Code | Meaning |
| ----------------- | ------- |

View File

@ -9,7 +9,7 @@ menu:
sidebar:
parent: "packages"
name: "Storage"
weight: 5
weight: 2
identifier: "storage"
---

View File

@ -15,7 +15,7 @@ menu:
# Swift Packages Repository
Publish [Swift](hhttps://www.swift.org/) packages for your user or organization.
Publish [Swift](https://www.swift.org/) packages for your user or organization.
**Table of Contents**

View File

@ -0,0 +1,20 @@
---
date: "2023-03-02T21:00:00+05:00"
title: "Usage: Gitea Profile READMEs"
slug: "profile-readme"
weight: 12
toc: false
draft: false
menu:
sidebar:
parent: "usage"
name: "Gitea Profile READMEs"
weight: 12
identifier: "profile-readme"
---
# Gitea Profile READMEs
To display a markdown file in your Gitea profile page, simply make a repository named ".profile" and edit the README.md file inside. Gitea will automatically pull this file in and display it above your repositories.
Note. You are welcome to make this repository private. Doing so will hide your source files from public viewing and allow you to privitize certain files. However, the README.md file will be the only file present on your profile. If you wish to have an entirely private .profile repository, remove or rename the README.md file.

View File

@ -54,7 +54,7 @@ Push to create is a feature that allows you to push to a repository that does no
## Enabling Push To Create
In the `app.ini` file, set `ENABLE_PUSH_CREATE_USER` to `true` and `ENABLE_PUSH_CREATE_ORG` to `true` if you want to allow users to create repositories in their own user account and in organizations they are a member of respectively. Restart Gitea for the changes to take effect. You can read more about these two options in the [Configuration Cheat Sheet]({{ < relref "doc/administration/config-cheat-sheet.zh-tw.md#repository-repository" > }}).
In the `app.ini` file, set `ENABLE_PUSH_CREATE_USER` to `true` and `ENABLE_PUSH_CREATE_ORG` to `true` if you want to allow users to create repositories in their own user account and in organizations they are a member of respectively. Restart Gitea for the changes to take effect. You can read more about these two options in the [Configuration Cheat Sheet]({{< relref "doc/administration/config-cheat-sheet.zh-tw.md#repository-repository" >}}).
## Using Push To Create

View File

@ -10,7 +10,6 @@ import (
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
@ -45,6 +44,9 @@ func (runs RunList) LoadTriggerUser(ctx context.Context) error {
run.TriggerUser = user_model.NewActionsUser()
} else {
run.TriggerUser = users[run.TriggerUserID]
if run.TriggerUser == nil {
run.TriggerUser = user_model.NewGhostUser()
}
}
}
return nil
@ -66,7 +68,6 @@ type FindRunOptions struct {
db.ListOptions
RepoID int64
OwnerID int64
IsClosed util.OptionalBool
WorkflowFileName string
TriggerUserID int64
Approved bool // not util.OptionalBool, it works only when it's true
@ -80,14 +81,6 @@ func (opts FindRunOptions) toConds() builder.Cond {
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
}
if opts.IsClosed.IsFalse() {
cond = cond.And(builder.Eq{"status": StatusWaiting}.Or(
builder.Eq{"status": StatusRunning}))
} else if opts.IsClosed.IsTrue() {
cond = cond.And(
builder.Neq{"status": StatusWaiting}.And(
builder.Neq{"status": StatusRunning}))
}
if opts.WorkflowFileName != "" {
cond = cond.And(builder.Eq{"workflow_id": opts.WorkflowFileName})
}

View File

@ -11,6 +11,7 @@ import (
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/shared/types"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/translation"
@ -28,7 +29,7 @@ type ActionRunner struct {
Version string `xorm:"VARCHAR(64)"`
OwnerID int64 `xorm:"index"` // org level runner, 0 means system
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
RepoID int64 `xorm:"index"` // repo level runner, if OwnerID also is zero, then it's a global
Repo *repo_model.Repository `xorm:"-"`
Description string `xorm:"TEXT"`
Base int // 0 native 1 docker 2 virtual machine
@ -52,14 +53,25 @@ type ActionRunner struct {
Deleted timeutil.TimeStamp `xorm:"deleted"`
}
func (r *ActionRunner) OwnType() string {
// BelongsToOwnerName before calling, should guarantee that all attributes are loaded
func (r *ActionRunner) BelongsToOwnerName() string {
if r.RepoID != 0 {
return fmt.Sprintf("Repo(%s)", r.Repo.FullName())
return r.Repo.FullName()
}
if r.OwnerID != 0 {
return fmt.Sprintf("Org(%s)", r.Owner.Name)
return r.Owner.Name
}
return "Global"
return ""
}
func (r *ActionRunner) BelongsToOwnerType() types.OwnerType {
if r.RepoID != 0 {
return types.OwnerTypeRepository
}
if r.OwnerID != 0 {
return types.OwnerTypeOrganization
}
return types.OwnerTypeSystemGlobal
}
func (r *ActionRunner) Status() runnerv1.RunnerStatus {

View File

@ -291,7 +291,7 @@ func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask
}
task.LogFilename = logFileName(job.Run.Repo.FullName(), task.ID)
if _, err := e.ID(task.ID).Cols("log_filename").Update(task); err != nil {
if err := UpdateTask(ctx, task, "log_filename"); err != nil {
return nil, false, err
}
@ -367,9 +367,18 @@ func UpdateTaskByState(ctx context.Context, state *runnerv1.TaskState) (*ActionT
return nil, util.ErrNotExist
}
if task.Status.IsDone() {
// the state is final, do nothing
return task, nil
}
// state.Result is not unspecified means the task is finished
if state.Result != runnerv1.Result_RESULT_UNSPECIFIED {
task.Status = Status(state.Result)
task.Stopped = timeutil.TimeStamp(state.StoppedAt.AsTime().Unix())
if err := UpdateTask(ctx, task, "status", "stopped"); err != nil {
return nil, err
}
if _, err := UpdateRunJob(ctx, &ActionRunJob{
ID: task.JobID,
Status: task.Status,
@ -379,10 +388,6 @@ func UpdateTaskByState(ctx context.Context, state *runnerv1.TaskState) (*ActionT
}
}
if _, err := e.ID(task.ID).Update(task); err != nil {
return nil, err
}
if err := task.LoadAttributes(ctx); err != nil {
return nil, err
}
@ -440,7 +445,7 @@ func StopTask(ctx context.Context, taskID int64, status Status) error {
return err
}
if _, err := e.ID(task.ID).Update(task); err != nil {
if err := UpdateTask(ctx, task, "status", "stopped"); err != nil {
return err
}

View File

@ -494,12 +494,27 @@ func activityQueryCondition(opts GetFeedsOptions) (builder.Cond, error) {
).From("`user`"),
))
} else if !opts.Actor.IsAdmin {
cond = cond.And(builder.In("act_user_id",
builder.Select("`user`.id").Where(
builder.Eq{"keep_activity_private": false}.
And(builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))).
Or(builder.Eq{"id": opts.Actor.ID}).From("`user`"),
))
uidCond := builder.Select("`user`.id").From("`user`").Where(
builder.Eq{"keep_activity_private": false}.
And(builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))).
Or(builder.Eq{"id": opts.Actor.ID})
if opts.RequestedUser != nil {
if opts.RequestedUser.IsOrganization() {
// An organization can always see the activities whose `act_user_id` is the same as its id.
uidCond = uidCond.Or(builder.Eq{"id": opts.RequestedUser.ID})
} else {
// A user can always see the activities of the organizations to which the user belongs.
uidCond = uidCond.Or(
builder.Eq{"type": user_model.UserTypeOrganization}.
And(builder.In("`user`.id", builder.Select("org_id").
Where(builder.Eq{"uid": opts.RequestedUser.ID}).
From("team_user"))),
)
}
}
cond = cond.And(builder.In("act_user_id", uidCond))
}
// check readable repositories by doer/actor

View File

@ -17,8 +17,6 @@ import (
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
// Task represents a task
@ -35,7 +33,7 @@ type Task struct {
StartTime timeutil.TimeStamp
EndTime timeutil.TimeStamp
PayloadContent string `xorm:"TEXT"`
Message string `xorm:"TEXT"` // if task failed, saved the error reason
Message string `xorm:"TEXT"` // if task failed, saved the error reason, it could be a JSON string of TranslatableMessage or a plain message
Created timeutil.TimeStamp `xorm:"created"`
}
@ -185,14 +183,6 @@ func GetMigratingTask(repoID int64) (*Task, error) {
return &task, nil
}
// HasFinishedMigratingTask returns if a finished migration task exists for the repo.
func HasFinishedMigratingTask(repoID int64) (bool, error) {
return db.GetEngine(db.DefaultContext).
Where("repo_id=? AND type=? AND status=?", repoID, structs.TaskTypeMigrateRepo, structs.TaskStatusFinished).
Table("task").
Exist()
}
// GetMigratingTaskByID returns the migrating task by repo's id
func GetMigratingTaskByID(id, doerID int64) (*Task, *migration.MigrateOptions, error) {
task := Task{
@ -214,27 +204,6 @@ func GetMigratingTaskByID(id, doerID int64) (*Task, *migration.MigrateOptions, e
return &task, &opts, nil
}
// FindTaskOptions find all tasks
type FindTaskOptions struct {
Status int
}
// ToConds generates conditions for database operation.
func (opts FindTaskOptions) ToConds() builder.Cond {
cond := builder.NewCond()
if opts.Status >= 0 {
cond = cond.And(builder.Eq{"status": opts.Status})
}
return cond
}
// FindTasks find all tasks
func FindTasks(opts FindTaskOptions) ([]*Task, error) {
tasks := make([]*Task, 0, 10)
err := db.GetEngine(db.DefaultContext).Where(opts.ToConds()).Find(&tasks)
return tasks, err
}
// CreateTask creates a task on database
func CreateTask(task *Task) error {
return db.Insert(db.DefaultContext, task)

View File

@ -108,3 +108,31 @@
is_prerelease: false
is_tag: false
created_unix: 946684803
- id: 9
repo_id: 57
publisher_id: 2
tag_name: "non-existing-target-branch"
lower_tag_name: "non-existing-target-branch"
target: "non-existing"
title: "non-existing-target-branch"
sha1: "cef06e48f2642cd0dc9597b4bea09f4b3f74aad6"
num_commits: 5
is_draft: false
is_prerelease: false
is_tag: false
created_unix: 946684803
- id: 10
repo_id: 57
publisher_id: 2
tag_name: "empty-target-branch"
lower_tag_name: "empty-target-branch"
target: ""
title: "empty-target-branch"
sha1: "cef06e48f2642cd0dc9597b4bea09f4b3f74aad6"
num_commits: 5
is_draft: false
is_prerelease: false
is_tag: false
created_unix: 946684803

View File

@ -23,6 +23,7 @@ import (
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
"xorm.io/xorm"
)
@ -240,6 +241,55 @@ func GetLatestCommitStatus(ctx context.Context, repoID int64, sha string, listOp
return statuses, count, db.GetEngine(ctx).In("id", ids).Find(&statuses)
}
// GetLatestCommitStatusForPairs returns all statuses with a unique context for a given list of repo-sha pairs
func GetLatestCommitStatusForPairs(ctx context.Context, repoIDsToLatestCommitSHAs map[int64]string, listOptions db.ListOptions) (map[int64][]*CommitStatus, error) {
type result struct {
ID int64
RepoID int64
}
results := make([]result, 0, len(repoIDsToLatestCommitSHAs))
sess := db.GetEngine(ctx).Table(&CommitStatus{})
// Create a disjunction of conditions for each repoID and SHA pair
conds := make([]builder.Cond, 0, len(repoIDsToLatestCommitSHAs))
for repoID, sha := range repoIDsToLatestCommitSHAs {
conds = append(conds, builder.Eq{"repo_id": repoID, "sha": sha})
}
sess = sess.Where(builder.Or(conds...)).
Select("max( id ) as id, repo_id").
GroupBy("context_hash, repo_id").OrderBy("max( id ) desc")
sess = db.SetSessionPagination(sess, &listOptions)
err := sess.Find(&results)
if err != nil {
return nil, err
}
ids := make([]int64, 0, len(results))
repoStatuses := make(map[int64][]*CommitStatus)
for _, result := range results {
ids = append(ids, result.ID)
}
statuses := make([]*CommitStatus, 0, len(ids))
if len(ids) > 0 {
err = db.GetEngine(ctx).In("id", ids).Find(&statuses)
if err != nil {
return nil, err
}
// Group the statuses by repo ID
for _, status := range statuses {
repoStatuses[status.RepoID] = append(repoStatuses[status.RepoID], status)
}
}
return repoStatuses, nil
}
// FindRepoRecentCommitStatusContexts returns repository's recent commit status contexts
func FindRepoRecentCommitStatusContexts(ctx context.Context, repoID int64, before time.Duration) ([]string, error) {
start := timeutil.TimeStampNow().AddDuration(-before)

View File

@ -161,33 +161,6 @@ func (l *Label) BelongsToRepo() bool {
return l.RepoID > 0
}
// Get color as RGB values in 0..255 range
func (l *Label) ColorRGB() (float64, float64, float64, error) {
color, err := strconv.ParseUint(l.Color[1:], 16, 64)
if err != nil {
return 0, 0, 0, err
}
r := float64(uint8(0xFF & (uint32(color) >> 16)))
g := float64(uint8(0xFF & (uint32(color) >> 8)))
b := float64(uint8(0xFF & uint32(color)))
return r, g, b, nil
}
// Determine if label text should be light or dark to be readable on background color
func (l *Label) UseLightTextColor() bool {
if strings.HasPrefix(l.Color, "#") {
if r, g, b, err := l.ColorRGB(); err == nil {
// Perceived brightness from: https://www.w3.org/TR/AERT/#color-contrast
// In the future WCAG 3 APCA may be a better solution
brightness := (0.299*r + 0.587*g + 0.114*b) / 255
return brightness < 0.35
}
}
return false
}
// Return scope substring of label name, or empty string if none exists
func (l *Label) ExclusiveScope() string {
if !l.Exclusive {

View File

@ -22,15 +22,6 @@ func TestLabel_CalOpenIssues(t *testing.T) {
assert.EqualValues(t, 2, label.NumOpenIssues)
}
func TestLabel_TextColor(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
assert.False(t, label.UseLightTextColor())
label = unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 2})
assert.True(t, label.UseLightTextColor())
}
func TestLabel_ExclusiveScope(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 7})

View File

@ -189,6 +189,20 @@ func (r *Review) LoadAttributes(ctx context.Context) (err error) {
return err
}
func (r *Review) HTMLTypeColorName() string {
switch r.Type {
case ReviewTypeApprove:
return "green"
case ReviewTypeComment:
return "grey"
case ReviewTypeReject:
return "red"
case ReviewTypeRequest:
return "yellow"
}
return "grey"
}
// GetReviewByID returns the review by the given ID
func GetReviewByID(ctx context.Context, id int64) (*Review, error) {
review := new(Review)

View File

@ -0,0 +1,53 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package alpine
import (
"context"
packages_model "code.gitea.io/gitea/models/packages"
alpine_module "code.gitea.io/gitea/modules/packages/alpine"
)
// GetBranches gets all available branches
func GetBranches(ctx context.Context, ownerID int64) ([]string, error) {
return packages_model.GetDistinctPropertyValues(
ctx,
packages_model.TypeAlpine,
ownerID,
packages_model.PropertyTypeFile,
alpine_module.PropertyBranch,
nil,
)
}
// GetRepositories gets all available repositories for the given branch
func GetRepositories(ctx context.Context, ownerID int64, branch string) ([]string, error) {
return packages_model.GetDistinctPropertyValues(
ctx,
packages_model.TypeAlpine,
ownerID,
packages_model.PropertyTypeFile,
alpine_module.PropertyRepository,
&packages_model.DistinctPropertyDependency{
Name: alpine_module.PropertyBranch,
Value: branch,
},
)
}
// GetArchitectures gets all available architectures for the given repository
func GetArchitectures(ctx context.Context, ownerID int64, repository string) ([]string, error) {
return packages_model.GetDistinctPropertyValues(
ctx,
packages_model.TypeAlpine,
ownerID,
packages_model.PropertyTypeFile,
alpine_module.PropertyArchitecture,
&packages_model.DistinctPropertyDependency{
Name: alpine_module.PropertyRepository,
Value: repository,
},
)
}

View File

@ -88,44 +88,42 @@ func SearchLatestPackages(ctx context.Context, opts *PackageSearchOptions) ([]*p
// GetDistributions gets all available distributions
func GetDistributions(ctx context.Context, ownerID int64) ([]string, error) {
return getDistinctPropertyValues(ctx, ownerID, "", debian_module.PropertyDistribution)
return packages.GetDistinctPropertyValues(
ctx,
packages.TypeDebian,
ownerID,
packages.PropertyTypeFile,
debian_module.PropertyDistribution,
nil,
)
}
// GetComponents gets all available components for the given distribution
func GetComponents(ctx context.Context, ownerID int64, distribution string) ([]string, error) {
return getDistinctPropertyValues(ctx, ownerID, distribution, debian_module.PropertyComponent)
return packages.GetDistinctPropertyValues(
ctx,
packages.TypeDebian,
ownerID,
packages.PropertyTypeFile,
debian_module.PropertyComponent,
&packages.DistinctPropertyDependency{
Name: debian_module.PropertyDistribution,
Value: distribution,
},
)
}
// GetArchitectures gets all available architectures for the given distribution
func GetArchitectures(ctx context.Context, ownerID int64, distribution string) ([]string, error) {
return getDistinctPropertyValues(ctx, ownerID, distribution, debian_module.PropertyArchitecture)
}
func getDistinctPropertyValues(ctx context.Context, ownerID int64, distribution, propName string) ([]string, error) {
var cond builder.Cond = builder.Eq{
"package_property.ref_type": packages.PropertyTypeFile,
"package_property.name": propName,
"package.type": packages.TypeDebian,
"package.owner_id": ownerID,
}
if distribution != "" {
innerCond := builder.
Expr("pp.ref_id = package_property.ref_id").
And(builder.Eq{
"pp.ref_type": packages.PropertyTypeFile,
"pp.name": debian_module.PropertyDistribution,
"pp.value": distribution,
})
cond = cond.And(builder.Exists(builder.Select("pp.ref_id").From("package_property pp").Where(innerCond)))
}
values := make([]string, 0, 5)
return values, db.GetEngine(ctx).
Table("package_property").
Distinct("package_property.value").
Join("INNER", "package_file", "package_file.id = package_property.ref_id").
Join("INNER", "package_version", "package_version.id = package_file.version_id").
Join("INNER", "package", "package.id = package_version.package_id").
Where(cond).
Find(&values)
return packages.GetDistinctPropertyValues(
ctx,
packages.TypeDebian,
ownerID,
packages.PropertyTypeFile,
debian_module.PropertyArchitecture,
&packages.DistinctPropertyDependency{
Name: debian_module.PropertyDistribution,
Value: distribution,
},
)
}

View File

@ -12,6 +12,7 @@ import (
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/packages/alpine"
"code.gitea.io/gitea/modules/packages/cargo"
"code.gitea.io/gitea/modules/packages/chef"
"code.gitea.io/gitea/modules/packages/composer"
@ -136,6 +137,8 @@ func GetPackageDescriptor(ctx context.Context, pv *PackageVersion) (*PackageDesc
var metadata interface{}
switch p.Type {
case TypeAlpine:
metadata = &alpine.VersionMetadata{}
case TypeCargo:
metadata = &cargo.Metadata{}
case TypeChef:
@ -152,6 +155,8 @@ func GetPackageDescriptor(ctx context.Context, pv *PackageVersion) (*PackageDesc
metadata = &debian.Metadata{}
case TypeGeneric:
// generic packages have no metadata
case TypeGo:
// go packages have no metadata
case TypeHelm:
metadata = &helm.Metadata{}
case TypeNuGet:

View File

@ -30,6 +30,7 @@ type Type string
// List of supported packages
const (
TypeAlpine Type = "alpine"
TypeCargo Type = "cargo"
TypeChef Type = "chef"
TypeComposer Type = "composer"
@ -38,6 +39,7 @@ const (
TypeContainer Type = "container"
TypeDebian Type = "debian"
TypeGeneric Type = "generic"
TypeGo Type = "go"
TypeHelm Type = "helm"
TypeMaven Type = "maven"
TypeNpm Type = "npm"
@ -51,6 +53,7 @@ const (
)
var TypeList = []Type{
TypeAlpine,
TypeCargo,
TypeChef,
TypeComposer,
@ -59,6 +62,7 @@ var TypeList = []Type{
TypeContainer,
TypeDebian,
TypeGeneric,
TypeGo,
TypeHelm,
TypeMaven,
TypeNpm,
@ -74,6 +78,8 @@ var TypeList = []Type{
// Name gets the name of the package type
func (pt Type) Name() string {
switch pt {
case TypeAlpine:
return "Alpine"
case TypeCargo:
return "Cargo"
case TypeChef:
@ -90,6 +96,8 @@ func (pt Type) Name() string {
return "Debian"
case TypeGeneric:
return "Generic"
case TypeGo:
return "Go"
case TypeHelm:
return "Helm"
case TypeMaven:
@ -117,6 +125,8 @@ func (pt Type) Name() string {
// SVGName gets the name of the package type svg image
func (pt Type) SVGName() string {
switch pt {
case TypeAlpine:
return "gitea-alpine"
case TypeCargo:
return "gitea-cargo"
case TypeChef:
@ -133,6 +143,8 @@ func (pt Type) SVGName() string {
return "gitea-debian"
case TypeGeneric:
return "octicon-package"
case TypeGo:
return "gitea-go"
case TypeHelm:
return "gitea-helm"
case TypeMaven:

View File

@ -7,6 +7,8 @@ import (
"context"
"code.gitea.io/gitea/models/db"
"xorm.io/builder"
)
func init() {
@ -81,3 +83,39 @@ func DeletePropertyByName(ctx context.Context, refType PropertyType, refID int64
_, err := db.GetEngine(ctx).Where("ref_type = ? AND ref_id = ? AND name = ?", refType, refID, name).Delete(&PackageProperty{})
return err
}
type DistinctPropertyDependency struct {
Name string
Value string
}
// GetDistinctPropertyValues returns all distinct property values for a given type.
// Optional: Search only in dependence of another property.
func GetDistinctPropertyValues(ctx context.Context, packageType Type, ownerID int64, refType PropertyType, propertyName string, dep *DistinctPropertyDependency) ([]string, error) {
var cond builder.Cond = builder.Eq{
"package_property.ref_type": refType,
"package_property.name": propertyName,
"package.type": packageType,
"package.owner_id": ownerID,
}
if dep != nil {
innerCond := builder.
Expr("pp.ref_id = package_property.ref_id").
And(builder.Eq{
"pp.ref_type": refType,
"pp.name": dep.Name,
"pp.value": dep.Value,
})
cond = cond.And(builder.Exists(builder.Select("pp.ref_id").From("package_property pp").Where(innerCond)))
}
values := make([]string, 0, 5)
return values, db.GetEngine(ctx).
Table("package_property").
Distinct("package_property.value").
Join("INNER", "package_file", "package_file.id = package_property.ref_id").
Join("INNER", "package_version", "package_version.id = package_file.version_id").
Join("INNER", "package", "package.id = package_version.package_id").
Where(cond).
Find(&values)
}

View File

@ -72,6 +72,7 @@ type Release struct {
OriginalAuthorID int64 `xorm:"index"`
LowerTagName string
Target string
TargetBehind string `xorm:"-"` // to handle non-existing or empty target
Title string
Sha1 string `xorm:"VARCHAR(40)"`
NumCommits int64

View File

@ -149,8 +149,8 @@ type Repository struct {
IsEmpty bool `xorm:"INDEX"`
IsArchived bool `xorm:"INDEX"`
IsMirror bool `xorm:"INDEX"`
*Mirror `xorm:"-"`
Status RepositoryStatus `xorm:"NOT NULL DEFAULT 0"`
Status RepositoryStatus `xorm:"NOT NULL DEFAULT 0"`
RenderingMetas map[string]string `xorm:"-"`
DocumentRenderingMetas map[string]string `xorm:"-"`
@ -553,16 +553,9 @@ func ComposeHTTPSCloneURL(owner, repo string) string {
return fmt.Sprintf("%s%s/%s.git", setting.AppURL, url.PathEscape(owner), url.PathEscape(repo))
}
func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
repoName := repo.Name
if isWiki {
repoName += ".wiki"
}
func ComposeSSHCloneURL(ownerName, repoName string) string {
sshUser := setting.SSH.User
cl := new(CloneLink)
// if we have a ipv6 literal we need to put brackets around it
// for the git cloning to work.
sshDomain := setting.SSH.Domain
@ -572,12 +565,25 @@ func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
}
if setting.SSH.Port != 22 {
cl.SSH = fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser, net.JoinHostPort(setting.SSH.Domain, strconv.Itoa(setting.SSH.Port)), url.PathEscape(repo.OwnerName), url.PathEscape(repoName))
} else if setting.Repository.UseCompatSSHURI {
cl.SSH = fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser, sshDomain, url.PathEscape(repo.OwnerName), url.PathEscape(repoName))
} else {
cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", sshUser, sshDomain, url.PathEscape(repo.OwnerName), url.PathEscape(repoName))
return fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser,
net.JoinHostPort(setting.SSH.Domain, strconv.Itoa(setting.SSH.Port)),
url.PathEscape(ownerName),
url.PathEscape(repoName))
}
if setting.Repository.UseCompatSSHURI {
return fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser, sshDomain, url.PathEscape(ownerName), url.PathEscape(repoName))
}
return fmt.Sprintf("%s@%s:%s/%s.git", sshUser, sshDomain, url.PathEscape(ownerName), url.PathEscape(repoName))
}
func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
repoName := repo.Name
if isWiki {
repoName += ".wiki"
}
cl := new(CloneLink)
cl.SSH = ComposeSSHCloneURL(repo.OwnerName, repoName)
cl.HTTPS = ComposeHTTPSCloneURL(repo.OwnerName, repoName)
return cl
}

View File

@ -0,0 +1,29 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package types
import "code.gitea.io/gitea/modules/translation"
type OwnerType string
const (
OwnerTypeSystemGlobal = "system-global"
OwnerTypeIndividual = "individual"
OwnerTypeRepository = "repository"
OwnerTypeOrganization = "organization"
)
func (o OwnerType) LocaleString(locale translation.Locale) string {
switch o {
case OwnerTypeSystemGlobal:
return locale.Tr("concept_system_global")
case OwnerTypeIndividual:
return locale.Tr("concept_user_individual")
case OwnerTypeRepository:
return locale.Tr("concept_code_repository")
case OwnerTypeOrganization:
return locale.Tr("concept_user_organization")
}
return locale.Tr("unknown")
}

View File

@ -5,13 +5,14 @@ package avatar
import (
"bytes"
"errors"
"fmt"
"image"
"image/color"
"image/png"
_ "image/gif" // for processing gif images
_ "image/jpeg" // for processing jpeg images
_ "image/png" // for processing png images
"code.gitea.io/gitea/modules/avatar/identicon"
"code.gitea.io/gitea/modules/setting"
@ -22,8 +23,11 @@ import (
_ "golang.org/x/image/webp" // for processing webp images
)
// AvatarSize returns avatar's size
const AvatarSize = 290
// DefaultAvatarSize is the target CSS pixel size for avatar generation. It is
// multiplied by setting.Avatar.RenderedSizeFactor and the resulting size is the
// usual size of avatar image saved on server, unless the original file is smaller
// than the size after resizing.
const DefaultAvatarSize = 256
// RandomImageSize generates and returns a random avatar image unique to input data
// in custom size (height and width).
@ -39,28 +43,44 @@ func RandomImageSize(size int, data []byte) (image.Image, error) {
// RandomImage generates and returns a random avatar image unique to input data
// in default size (height and width).
func RandomImage(data []byte) (image.Image, error) {
return RandomImageSize(AvatarSize, data)
return RandomImageSize(DefaultAvatarSize*setting.Avatar.RenderedSizeFactor, data)
}
// Prepare accepts a byte slice as input, validates it contains an image of an
// acceptable format, and crops and resizes it appropriately.
func Prepare(data []byte) (*image.Image, error) {
imgCfg, _, err := image.DecodeConfig(bytes.NewReader(data))
// processAvatarImage process the avatar image data, crop and resize it if necessary.
// the returned data could be the original image if no processing is needed.
func processAvatarImage(data []byte, maxOriginSize int64) ([]byte, error) {
imgCfg, imgType, err := image.DecodeConfig(bytes.NewReader(data))
if err != nil {
return nil, fmt.Errorf("DecodeConfig: %w", err)
return nil, fmt.Errorf("image.DecodeConfig: %w", err)
}
// for safety, only accept known types explicitly
if imgType != "png" && imgType != "jpeg" && imgType != "gif" && imgType != "webp" {
return nil, errors.New("unsupported avatar image type")
}
// do not process image which is too large, it would consume too much memory
if imgCfg.Width > setting.Avatar.MaxWidth {
return nil, fmt.Errorf("Image width is too large: %d > %d", imgCfg.Width, setting.Avatar.MaxWidth)
return nil, fmt.Errorf("image width is too large: %d > %d", imgCfg.Width, setting.Avatar.MaxWidth)
}
if imgCfg.Height > setting.Avatar.MaxHeight {
return nil, fmt.Errorf("Image height is too large: %d > %d", imgCfg.Height, setting.Avatar.MaxHeight)
return nil, fmt.Errorf("image height is too large: %d > %d", imgCfg.Height, setting.Avatar.MaxHeight)
}
// If the origin is small enough, just use it, then APNG could be supported,
// otherwise, if the image is processed later, APNG loses animation.
// And one more thing, webp is not fully supported, for animated webp, image.DecodeConfig works but Decode fails.
// So for animated webp, if the uploaded file is smaller than maxOriginSize, it will be used, if it's larger, there will be an error.
if len(data) < int(maxOriginSize) {
return data, nil
}
img, _, err := image.Decode(bytes.NewReader(data))
if err != nil {
return nil, fmt.Errorf("Decode: %w", err)
return nil, fmt.Errorf("image.Decode: %w", err)
}
// try to crop and resize the origin image if necessary
if imgCfg.Width != imgCfg.Height {
var newSize, ax, ay int
if imgCfg.Width > imgCfg.Height {
@ -74,13 +94,33 @@ func Prepare(data []byte) (*image.Image, error) {
img, err = cutter.Crop(img, cutter.Config{
Width: newSize,
Height: newSize,
Anchor: image.Point{ax, ay},
Anchor: image.Point{X: ax, Y: ay},
})
if err != nil {
return nil, err
}
}
img = resize.Resize(AvatarSize, AvatarSize, img, resize.Bilinear)
return &img, nil
targetSize := uint(DefaultAvatarSize * setting.Avatar.RenderedSizeFactor)
img = resize.Resize(targetSize, targetSize, img, resize.Bilinear)
// try to encode the cropped/resized image to png
bs := bytes.Buffer{}
if err = png.Encode(&bs, img); err != nil {
return nil, err
}
resized := bs.Bytes()
// usually the png compression is not good enough, use the original image (no cropping/resizing) if the origin is smaller
if len(data) <= len(resized) {
return data, nil
}
return resized, nil
}
// ProcessAvatarImage process the avatar image data, crop and resize it if necessary.
// the returned data could be the original image if no processing is needed.
func ProcessAvatarImage(data []byte) ([]byte, error) {
return processAvatarImage(data, setting.Avatar.MaxOriginSize)
}

View File

@ -4,6 +4,9 @@
package avatar
import (
"bytes"
"image"
"image/png"
"os"
"testing"
@ -25,49 +28,109 @@ func Test_RandomImage(t *testing.T) {
assert.NoError(t, err)
}
func Test_PrepareWithPNG(t *testing.T) {
func Test_ProcessAvatarPNG(t *testing.T) {
setting.Avatar.MaxWidth = 4096
setting.Avatar.MaxHeight = 4096
data, err := os.ReadFile("testdata/avatar.png")
assert.NoError(t, err)
imgPtr, err := Prepare(data)
_, err = processAvatarImage(data, 262144)
assert.NoError(t, err)
assert.Equal(t, 290, (*imgPtr).Bounds().Max.X)
assert.Equal(t, 290, (*imgPtr).Bounds().Max.Y)
}
func Test_PrepareWithJPEG(t *testing.T) {
func Test_ProcessAvatarJPEG(t *testing.T) {
setting.Avatar.MaxWidth = 4096
setting.Avatar.MaxHeight = 4096
data, err := os.ReadFile("testdata/avatar.jpeg")
assert.NoError(t, err)
imgPtr, err := Prepare(data)
_, err = processAvatarImage(data, 262144)
assert.NoError(t, err)
assert.Equal(t, 290, (*imgPtr).Bounds().Max.X)
assert.Equal(t, 290, (*imgPtr).Bounds().Max.Y)
}
func Test_PrepareWithInvalidImage(t *testing.T) {
func Test_ProcessAvatarInvalidData(t *testing.T) {
setting.Avatar.MaxWidth = 5
setting.Avatar.MaxHeight = 5
_, err := Prepare([]byte{})
assert.EqualError(t, err, "DecodeConfig: image: unknown format")
_, err := processAvatarImage([]byte{}, 12800)
assert.EqualError(t, err, "image.DecodeConfig: image: unknown format")
}
func Test_PrepareWithInvalidImageSize(t *testing.T) {
func Test_ProcessAvatarInvalidImageSize(t *testing.T) {
setting.Avatar.MaxWidth = 5
setting.Avatar.MaxHeight = 5
data, err := os.ReadFile("testdata/avatar.png")
assert.NoError(t, err)
_, err = Prepare(data)
assert.EqualError(t, err, "Image width is too large: 10 > 5")
_, err = processAvatarImage(data, 12800)
assert.EqualError(t, err, "image width is too large: 10 > 5")
}
func Test_ProcessAvatarImage(t *testing.T) {
setting.Avatar.MaxWidth = 4096
setting.Avatar.MaxHeight = 4096
scaledSize := DefaultAvatarSize * setting.Avatar.RenderedSizeFactor
newImgData := func(size int, optHeight ...int) []byte {
width := size
height := size
if len(optHeight) == 1 {
height = optHeight[0]
}
img := image.NewRGBA(image.Rect(0, 0, width, height))
bs := bytes.Buffer{}
err := png.Encode(&bs, img)
assert.NoError(t, err)
return bs.Bytes()
}
// if origin image canvas is too large, crop and resize it
origin := newImgData(500, 600)
result, err := processAvatarImage(origin, 0)
assert.NoError(t, err)
assert.NotEqual(t, origin, result)
decoded, err := png.Decode(bytes.NewReader(result))
assert.NoError(t, err)
assert.EqualValues(t, scaledSize, decoded.Bounds().Max.X)
assert.EqualValues(t, scaledSize, decoded.Bounds().Max.Y)
// if origin image is smaller than the default size, use the origin image
origin = newImgData(1)
result, err = processAvatarImage(origin, 0)
assert.NoError(t, err)
assert.Equal(t, origin, result)
// use the origin image if the origin is smaller
origin = newImgData(scaledSize + 100)
result, err = processAvatarImage(origin, 0)
assert.NoError(t, err)
assert.Less(t, len(result), len(origin))
// still use the origin image if the origin doesn't exceed the max-origin-size
origin = newImgData(scaledSize + 100)
result, err = processAvatarImage(origin, 262144)
assert.NoError(t, err)
assert.Equal(t, origin, result)
// allow to use known image format (eg: webp) if it is small enough
origin, err = os.ReadFile("testdata/animated.webp")
assert.NoError(t, err)
result, err = processAvatarImage(origin, 262144)
assert.NoError(t, err)
assert.Equal(t, origin, result)
// do not support unknown image formats, eg: SVG may contain embedded JS
origin = []byte("<svg></svg>")
_, err = processAvatarImage(origin, 262144)
assert.ErrorContains(t, err, "image: unknown format")
// make sure the canvas size limit works
setting.Avatar.MaxWidth = 5
setting.Avatar.MaxHeight = 5
origin = newImgData(10)
_, err = processAvatarImage(origin, 262144)
assert.ErrorContains(t, err, "image width is too large: 10 > 5")
}

BIN
modules/avatar/testdata/animated.webp vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

View File

@ -36,19 +36,20 @@ type Render interface {
// Context represents context of a request.
type Context struct {
Resp ResponseWriter
Req *http.Request
Resp ResponseWriter
Req *http.Request
Render Render
Data middleware.ContextData // data used by MVC templates
PageData map[string]any // data used by JavaScript modules in one page, it's `window.config.pageData`
Render Render
Locale translation.Locale
Cache cache.Cache
Csrf CSRFProtector
Flash *middleware.Flash
Session session.Store
Link string // current request URL
EscapedLink string
Locale translation.Locale
Cache cache.Cache
Csrf CSRFProtector
Flash *middleware.Flash
Session session.Store
Link string // current request URL (without query string)
Doer *user_model.User
IsSigned bool
IsBasicAuth bool

View File

@ -6,7 +6,6 @@ package context
import (
"encoding/hex"
"net/http"
"strconv"
"strings"
"code.gitea.io/gitea/modules/setting"
@ -85,21 +84,3 @@ func (ctx *Context) CookieEncrypt(secret, value string) string {
return hex.EncodeToString(text)
}
// GetCookieInt returns cookie result in int type.
func (ctx *Context) GetCookieInt(name string) int {
r, _ := strconv.Atoi(ctx.GetSiteCookie(name))
return r
}
// GetCookieInt64 returns cookie result in int64 type.
func (ctx *Context) GetCookieInt64(name string) int64 {
r, _ := strconv.ParseInt(ctx.GetSiteCookie(name), 10, 64)
return r
}
// GetCookieFloat64 returns cookie result in float64 type.
func (ctx *Context) GetCookieFloat64(name string) float64 {
v, _ := strconv.ParseFloat(ctx.GetSiteCookie(name), 64)
return v
}

View File

@ -4,14 +4,7 @@
package context
import (
"path"
"strings"
"code.gitea.io/gitea/models/unit"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/issue/template"
"code.gitea.io/gitea/modules/log"
api "code.gitea.io/gitea/modules/structs"
)
// IsUserSiteAdmin returns true if current user is a site admin
@ -19,11 +12,6 @@ func (ctx *Context) IsUserSiteAdmin() bool {
return ctx.IsSigned && ctx.Doer.IsAdmin
}
// IsUserRepoOwner returns true if current user owns current repo
func (ctx *Context) IsUserRepoOwner() bool {
return ctx.Repo.IsOwner()
}
// IsUserRepoAdmin returns true if current user is admin in current repo
func (ctx *Context) IsUserRepoAdmin() bool {
return ctx.Repo.IsAdmin()
@ -39,100 +27,3 @@ func (ctx *Context) IsUserRepoWriter(unitTypes []unit.Type) bool {
return false
}
// IsUserRepoReaderSpecific returns true if current user can read current repo's specific part
func (ctx *Context) IsUserRepoReaderSpecific(unitType unit.Type) bool {
return ctx.Repo.CanRead(unitType)
}
// IsUserRepoReaderAny returns true if current user can read any part of current repo
func (ctx *Context) IsUserRepoReaderAny() bool {
return ctx.Repo.HasAccess()
}
// IssueTemplatesFromDefaultBranch checks for valid issue templates in the repo's default branch,
func (ctx *Context) IssueTemplatesFromDefaultBranch() []*api.IssueTemplate {
ret, _ := ctx.IssueTemplatesErrorsFromDefaultBranch()
return ret
}
// IssueTemplatesErrorsFromDefaultBranch checks for issue templates in the repo's default branch,
// returns valid templates and the errors of invalid template files.
func (ctx *Context) IssueTemplatesErrorsFromDefaultBranch() ([]*api.IssueTemplate, map[string]error) {
var issueTemplates []*api.IssueTemplate
if ctx.Repo.Repository.IsEmpty {
return issueTemplates, nil
}
if ctx.Repo.Commit == nil {
var err error
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetBranchCommit(ctx.Repo.Repository.DefaultBranch)
if err != nil {
return issueTemplates, nil
}
}
invalidFiles := map[string]error{}
for _, dirName := range IssueTemplateDirCandidates {
tree, err := ctx.Repo.Commit.SubTree(dirName)
if err != nil {
log.Debug("get sub tree of %s: %v", dirName, err)
continue
}
entries, err := tree.ListEntries()
if err != nil {
log.Debug("list entries in %s: %v", dirName, err)
return issueTemplates, nil
}
for _, entry := range entries {
if !template.CouldBe(entry.Name()) {
continue
}
fullName := path.Join(dirName, entry.Name())
if it, err := template.UnmarshalFromEntry(entry, dirName); err != nil {
invalidFiles[fullName] = err
} else {
if !strings.HasPrefix(it.Ref, "refs/") { // Assume that the ref intended is always a branch - for tags users should use refs/tags/<ref>
it.Ref = git.BranchPrefix + it.Ref
}
issueTemplates = append(issueTemplates, it)
}
}
}
return issueTemplates, invalidFiles
}
// IssueConfigFromDefaultBranch returns the issue config for this repo.
// It never returns a nil config.
func (ctx *Context) IssueConfigFromDefaultBranch() (api.IssueConfig, error) {
if ctx.Repo.Repository.IsEmpty {
return GetDefaultIssueConfig(), nil
}
commit, err := ctx.Repo.GitRepo.GetBranchCommit(ctx.Repo.Repository.DefaultBranch)
if err != nil {
return GetDefaultIssueConfig(), err
}
for _, configName := range IssueConfigCandidates {
if _, err := commit.GetTreeEntryByPath(configName + ".yaml"); err == nil {
return ctx.Repo.GetIssueConfig(configName+".yaml", commit)
}
if _, err := commit.GetTreeEntryByPath(configName + ".yml"); err == nil {
return ctx.Repo.GetIssueConfig(configName+".yml", commit)
}
}
return GetDefaultIssueConfig(), nil
}
func (ctx *Context) HasIssueTemplatesOrContactLinks() bool {
if len(ctx.IssueTemplatesFromDefaultBranch()) > 0 {
return true
}
issueConfig, _ := ctx.IssueConfigFromDefaultBranch()
return len(issueConfig.ContactLinks) > 0
}

View File

@ -4,71 +4,20 @@
package context
import (
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/modules/httpcache"
"code.gitea.io/gitea/modules/typesniffer"
"code.gitea.io/gitea/modules/httplib"
)
type ServeHeaderOptions struct {
ContentType string // defaults to "application/octet-stream"
ContentTypeCharset string
ContentLength *int64
Disposition string // defaults to "attachment"
Filename string
CacheDuration time.Duration // defaults to 5 minutes
LastModified time.Time
}
type ServeHeaderOptions httplib.ServeHeaderOptions
// SetServeHeaders sets necessary content serve headers
func (ctx *Context) SetServeHeaders(opts *ServeHeaderOptions) {
header := ctx.Resp.Header()
contentType := typesniffer.ApplicationOctetStream
if opts.ContentType != "" {
if opts.ContentTypeCharset != "" {
contentType = opts.ContentType + "; charset=" + strings.ToLower(opts.ContentTypeCharset)
} else {
contentType = opts.ContentType
}
}
header.Set("Content-Type", contentType)
header.Set("X-Content-Type-Options", "nosniff")
if opts.ContentLength != nil {
header.Set("Content-Length", strconv.FormatInt(*opts.ContentLength, 10))
}
if opts.Filename != "" {
disposition := opts.Disposition
if disposition == "" {
disposition = "attachment"
}
backslashEscapedName := strings.ReplaceAll(strings.ReplaceAll(opts.Filename, `\`, `\\`), `"`, `\"`) // \ -> \\, " -> \"
header.Set("Content-Disposition", fmt.Sprintf(`%s; filename="%s"; filename*=UTF-8''%s`, disposition, backslashEscapedName, url.PathEscape(opts.Filename)))
header.Set("Access-Control-Expose-Headers", "Content-Disposition")
}
duration := opts.CacheDuration
if duration == 0 {
duration = 5 * time.Minute
}
httpcache.SetCacheControlInHeader(header, duration)
if !opts.LastModified.IsZero() {
header.Set("Last-Modified", opts.LastModified.UTC().Format(http.TimeFormat))
}
func (ctx *Context) SetServeHeaders(opt *ServeHeaderOptions) {
httplib.ServeSetHeaders(ctx.Resp, (*httplib.ServeHeaderOptions)(opt))
}
// ServeContent serves content to http request
func (ctx *Context) ServeContent(r io.ReadSeeker, opts *ServeHeaderOptions) {
ctx.SetServeHeaders(opts)
httplib.ServeSetHeaders(ctx.Resp, (*httplib.ServeHeaderOptions)(opts))
http.ServeContent(ctx.Resp, ctx.Req, opts.Filename, opts.LastModified, r)
}

View File

@ -5,6 +5,7 @@ package context
import (
"net/http"
"net/http/httptest"
"testing"
"code.gitea.io/gitea/modules/setting"
@ -12,27 +13,10 @@ import (
"github.com/stretchr/testify/assert"
)
type mockResponseWriter struct {
header http.Header
}
func (m *mockResponseWriter) Header() http.Header {
return m.header
}
func (m *mockResponseWriter) Write(bytes []byte) (int, error) {
panic("implement me")
}
func (m *mockResponseWriter) WriteHeader(statusCode int) {
panic("implement me")
}
func TestRemoveSessionCookieHeader(t *testing.T) {
w := &mockResponseWriter{}
w.header = http.Header{}
w.header.Add("Set-Cookie", (&http.Cookie{Name: setting.SessionConfig.CookieName, Value: "foo"}).String())
w.header.Add("Set-Cookie", (&http.Cookie{Name: "other", Value: "bar"}).String())
w := httptest.NewRecorder()
w.Header().Add("Set-Cookie", (&http.Cookie{Name: setting.SessionConfig.CookieName, Value: "foo"}).String())
w.Header().Add("Set-Cookie", (&http.Cookie{Name: "other", Value: "bar"}).String())
assert.Len(t, w.Header().Values("Set-Cookie"), 2)
removeSessionCookieHeader(w)
assert.Len(t, w.Header().Values("Set-Cookie"), 1)

View File

@ -8,7 +8,6 @@ import (
"context"
"fmt"
"html"
"io"
"net/http"
"net/url"
"path"
@ -28,33 +27,12 @@ import (
"code.gitea.io/gitea/modules/log"
repo_module "code.gitea.io/gitea/modules/repository"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/util"
asymkey_service "code.gitea.io/gitea/services/asymkey"
"github.com/editorconfig/editorconfig-core-go/v2"
"gopkg.in/yaml.v3"
)
// IssueTemplateDirCandidates issue templates directory
var IssueTemplateDirCandidates = []string{
"ISSUE_TEMPLATE",
"issue_template",
".gitea/ISSUE_TEMPLATE",
".gitea/issue_template",
".github/ISSUE_TEMPLATE",
".github/issue_template",
".gitlab/ISSUE_TEMPLATE",
".gitlab/issue_template",
}
var IssueConfigCandidates = []string{
".gitea/ISSUE_TEMPLATE/config",
".gitea/issue_template/config",
".github/ISSUE_TEMPLATE/config",
".github/issue_template/config",
}
// PullRequest contains information to make a pull request
type PullRequest struct {
BaseRepo *repo_model.Repository
@ -83,7 +61,6 @@ type Repository struct {
RepoLink string
CloneLink repo_model.CloneLink
CommitsCount int64
Mirror *repo_model.Mirror
PullRequest *PullRequest
}
@ -341,7 +318,14 @@ func EarlyResponseForGoGetMeta(ctx *Context) {
ctx.PlainText(http.StatusBadRequest, "invalid repository path")
return
}
goImportContent := fmt.Sprintf("%s git %s", ComposeGoGetImport(username, reponame), repo_model.ComposeHTTPSCloneURL(username, reponame))
var cloneURL string
if setting.Repository.GoGetCloneURLProtocol == "ssh" {
cloneURL = repo_model.ComposeSSHCloneURL(username, reponame)
} else {
cloneURL = repo_model.ComposeHTTPSCloneURL(username, reponame)
}
goImportContent := fmt.Sprintf("%s git %s", ComposeGoGetImport(username, reponame), cloneURL)
htmlMeta := fmt.Sprintf(`<meta name="go-import" content="%s">`, html.EscapeString(goImportContent))
ctx.PlainText(http.StatusOK, htmlMeta)
}
@ -395,13 +379,9 @@ func repoAssignment(ctx *Context, repo *repo_model.Repository) {
ctx.Data["Permission"] = &ctx.Repo.Permission
if repo.IsMirror {
ctx.Repo.Mirror, err = repo_model.GetMirrorByRepoID(ctx, repo.ID)
pullMirror, err := repo_model.GetMirrorByRepoID(ctx, repo.ID)
if err == nil {
ctx.Repo.Mirror.Repo = repo
ctx.Data["IsPullMirror"] = true
ctx.Data["MirrorEnablePrune"] = ctx.Repo.Mirror.EnablePrune
ctx.Data["MirrorInterval"] = ctx.Repo.Mirror.Interval
ctx.Data["Mirror"] = ctx.Repo.Mirror
ctx.Data["PullMirror"] = pullMirror
} else if err != repo_model.ErrMirrorNotExist {
ctx.ServerError("GetMirrorByRepoID", err)
return
@ -1062,74 +1042,3 @@ func UnitTypes() func(ctx *Context) {
ctx.Data["UnitTypeActions"] = unit_model.TypeActions
}
}
func GetDefaultIssueConfig() api.IssueConfig {
return api.IssueConfig{
BlankIssuesEnabled: true,
ContactLinks: make([]api.IssueConfigContactLink, 0),
}
}
// GetIssueConfig loads the given issue config file.
// It never returns a nil config.
func (r *Repository) GetIssueConfig(path string, commit *git.Commit) (api.IssueConfig, error) {
if r.GitRepo == nil {
return GetDefaultIssueConfig(), nil
}
var err error
treeEntry, err := commit.GetTreeEntryByPath(path)
if err != nil {
return GetDefaultIssueConfig(), err
}
reader, err := treeEntry.Blob().DataAsync()
if err != nil {
log.Debug("DataAsync: %v", err)
return GetDefaultIssueConfig(), nil
}
defer reader.Close()
configContent, err := io.ReadAll(reader)
if err != nil {
return GetDefaultIssueConfig(), err
}
issueConfig := api.IssueConfig{}
if err := yaml.Unmarshal(configContent, &issueConfig); err != nil {
return GetDefaultIssueConfig(), err
}
for pos, link := range issueConfig.ContactLinks {
if link.Name == "" {
return GetDefaultIssueConfig(), fmt.Errorf("contact_link at position %d is missing name key", pos+1)
}
if link.URL == "" {
return GetDefaultIssueConfig(), fmt.Errorf("contact_link at position %d is missing url key", pos+1)
}
if link.About == "" {
return GetDefaultIssueConfig(), fmt.Errorf("contact_link at position %d is missing about key", pos+1)
}
_, err = url.ParseRequestURI(link.URL)
if err != nil {
return GetDefaultIssueConfig(), fmt.Errorf("%s is not a valid URL", link.URL)
}
}
return issueConfig, nil
}
// IsIssueConfig returns if the given path is a issue config file.
func (r *Repository) IsIssueConfig(path string) bool {
for _, configName := range IssueConfigCandidates {
if path == configName+".yaml" || path == configName+".yml" {
return true
}
}
return false
}

View File

@ -106,6 +106,17 @@ func GetBranchesByPath(ctx context.Context, path string, skip, limit int) ([]*Br
return gitRepo.GetBranches(skip, limit)
}
// GetBranchCommitID returns a branch commit ID by its name
func GetBranchCommitID(ctx context.Context, path, branch string) (string, error) {
gitRepo, err := OpenRepository(ctx, path)
if err != nil {
return "", err
}
defer gitRepo.Close()
return gitRepo.GetBranchCommitID(branch)
}
// GetBranches returns a slice of *git.Branch
func (repo *Repository) GetBranches(skip, limit int) ([]*Branch, int, error) {
brs, countAll, err := repo.GetBranchNames(skip, limit)

View File

@ -11,6 +11,7 @@ import (
"os"
"os/signal"
"runtime/pprof"
"strconv"
"sync"
"syscall"
"time"
@ -45,7 +46,7 @@ type Manager struct {
func newGracefulManager(ctx context.Context) *Manager {
manager := &Manager{
isChild: len(os.Getenv(listenFDs)) > 0 && os.Getppid() > 1,
isChild: len(os.Getenv(listenFDsEnv)) > 0 && os.Getppid() > 1,
lock: &sync.RWMutex{},
}
manager.createServerWaitGroup.Add(numberOfServersToCreate)
@ -53,6 +54,41 @@ func newGracefulManager(ctx context.Context) *Manager {
return manager
}
type systemdNotifyMsg string
const (
readyMsg systemdNotifyMsg = "READY=1"
stoppingMsg systemdNotifyMsg = "STOPPING=1"
reloadingMsg systemdNotifyMsg = "RELOADING=1"
watchdogMsg systemdNotifyMsg = "WATCHDOG=1"
)
func statusMsg(msg string) systemdNotifyMsg {
return systemdNotifyMsg("STATUS=" + msg)
}
func pidMsg() systemdNotifyMsg {
return systemdNotifyMsg("MAINPID=" + strconv.Itoa(os.Getpid()))
}
// Notify systemd of status via the notify protocol
func (g *Manager) notify(msg systemdNotifyMsg) {
conn, err := getNotifySocket()
if err != nil {
// the err is logged in getNotifySocket
return
}
if conn == nil {
return
}
defer conn.Close()
if _, err = conn.Write([]byte(msg)); err != nil {
log.Warn("Failed to notify NOTIFY_SOCKET: %v", err)
return
}
}
func (g *Manager) start(ctx context.Context) {
// Make contexts
g.terminateCtx, g.terminateCtxCancel = context.WithCancel(ctx)
@ -72,6 +108,8 @@ func (g *Manager) start(ctx context.Context) {
// Set the running state & handle signals
g.setState(stateRunning)
g.notify(statusMsg("Starting Gitea"))
g.notify(pidMsg())
go g.handleSignals(g.managerCtx)
// Handle clean up of unused provided listeners and delayed start-up
@ -84,6 +122,7 @@ func (g *Manager) start(ctx context.Context) {
// Ignore the error here there's not much we can do with it
// They're logged in the CloseProvidedListeners function
_ = CloseProvidedListeners()
g.notify(readyMsg)
}()
if setting.StartupTimeout > 0 {
go func() {
@ -104,6 +143,8 @@ func (g *Manager) start(ctx context.Context) {
return
case <-time.After(setting.StartupTimeout):
log.Error("Startup took too long! Shutting down")
g.notify(statusMsg("Startup took too long! Shutting down"))
g.notify(stoppingMsg)
g.doShutdown()
}
}()
@ -126,6 +167,13 @@ func (g *Manager) handleSignals(ctx context.Context) {
syscall.SIGTSTP,
)
watchdogTimeout := getWatchdogTimeout()
t := &time.Ticker{}
if watchdogTimeout != 0 {
g.notify(watchdogMsg)
t = time.NewTicker(watchdogTimeout / 2)
}
pid := syscall.Getpid()
for {
select {
@ -136,6 +184,7 @@ func (g *Manager) handleSignals(ctx context.Context) {
g.DoGracefulRestart()
case syscall.SIGUSR1:
log.Warn("PID %d. Received SIGUSR1. Releasing and reopening logs", pid)
g.notify(statusMsg("Releasing and reopening logs"))
if err := log.ReleaseReopen(); err != nil {
log.Error("Error whilst releasing and reopening logs: %v", err)
}
@ -153,6 +202,8 @@ func (g *Manager) handleSignals(ctx context.Context) {
default:
log.Info("PID %d. Received %v.", pid, sig)
}
case <-t.C:
g.notify(watchdogMsg)
case <-ctx.Done():
log.Warn("PID: %d. Background context for manager closed - %v - Shutting down...", pid, ctx.Err())
g.DoGracefulShutdown()
@ -169,6 +220,9 @@ func (g *Manager) doFork() error {
}
g.forked = true
g.lock.Unlock()
g.notify(reloadingMsg)
// We need to move the file logs to append pids
setting.RestartLogsWithPIDSuffix()
@ -191,18 +245,27 @@ func (g *Manager) DoGracefulRestart() {
}
} else {
log.Info("PID: %d. Not set restartable. Shutting down...", os.Getpid())
g.notify(stoppingMsg)
g.doShutdown()
}
}
// DoImmediateHammer causes an immediate hammer
func (g *Manager) DoImmediateHammer() {
g.notify(statusMsg("Sending immediate hammer"))
g.doHammerTime(0 * time.Second)
}
// DoGracefulShutdown causes a graceful shutdown
func (g *Manager) DoGracefulShutdown() {
g.lock.Lock()
if !g.forked {
g.lock.Unlock()
g.notify(stoppingMsg)
} else {
g.lock.Unlock()
g.notify(statusMsg("Shutting down after fork"))
}
g.doShutdown()
}

View File

@ -14,6 +14,7 @@ import (
"strconv"
"strings"
"sync"
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
@ -21,9 +22,12 @@ import (
)
const (
listenFDs = "LISTEN_FDS"
startFD = 3
unlinkFDs = "GITEA_UNLINK_FDS"
listenFDsEnv = "LISTEN_FDS"
startFD = 3
unlinkFDsEnv = "GITEA_UNLINK_FDS"
notifySocketEnv = "NOTIFY_SOCKET"
watchdogTimeoutEnv = "WATCHDOG_USEC"
)
// In order to keep the working directory the same as when we started we record
@ -38,6 +42,9 @@ var (
activeListenersToUnlink = []bool{}
providedListeners = []net.Listener{}
activeListeners = []net.Listener{}
notifySocketAddr string
watchdogTimeout time.Duration
)
func getProvidedFDs() (savedErr error) {
@ -45,18 +52,52 @@ func getProvidedFDs() (savedErr error) {
once.Do(func() {
mutex.Lock()
defer mutex.Unlock()
// now handle some additional systemd provided things
notifySocketAddr = os.Getenv(notifySocketEnv)
if notifySocketAddr != "" {
log.Debug("Systemd Notify Socket provided: %s", notifySocketAddr)
savedErr = os.Unsetenv(notifySocketEnv)
if savedErr != nil {
log.Warn("Unable to Unset the NOTIFY_SOCKET environment variable: %v", savedErr)
return
}
// FIXME: We don't handle WATCHDOG_PID
timeoutStr := os.Getenv(watchdogTimeoutEnv)
if timeoutStr != "" {
savedErr = os.Unsetenv(watchdogTimeoutEnv)
if savedErr != nil {
log.Warn("Unable to Unset the WATCHDOG_USEC environment variable: %v", savedErr)
return
}
numFDs := os.Getenv(listenFDs)
s, err := strconv.ParseInt(timeoutStr, 10, 64)
if err != nil {
log.Error("Unable to parse the provided WATCHDOG_USEC: %v", err)
savedErr = fmt.Errorf("unable to parse the provided WATCHDOG_USEC: %w", err)
return
}
if s <= 0 {
log.Error("Unable to parse the provided WATCHDOG_USEC: %s should be a positive number", timeoutStr)
savedErr = fmt.Errorf("unable to parse the provided WATCHDOG_USEC: %s should be a positive number", timeoutStr)
return
}
watchdogTimeout = time.Duration(s) * time.Microsecond
}
} else {
log.Trace("No Systemd Notify Socket provided")
}
numFDs := os.Getenv(listenFDsEnv)
if numFDs == "" {
return
}
n, err := strconv.Atoi(numFDs)
if err != nil {
savedErr = fmt.Errorf("%s is not a number: %s. Err: %w", listenFDs, numFDs, err)
savedErr = fmt.Errorf("%s is not a number: %s. Err: %w", listenFDsEnv, numFDs, err)
return
}
fdsToUnlinkStr := strings.Split(os.Getenv(unlinkFDs), ",")
fdsToUnlinkStr := strings.Split(os.Getenv(unlinkFDsEnv), ",")
providedListenersToUnlink = make([]bool, n)
for _, fdStr := range fdsToUnlinkStr {
i, err := strconv.Atoi(fdStr)
@ -73,7 +114,7 @@ func getProvidedFDs() (savedErr error) {
if err == nil {
// Close the inherited file if it's a listener
if err = file.Close(); err != nil {
savedErr = fmt.Errorf("error closing provided socket fd %d: %s", i, err)
savedErr = fmt.Errorf("error closing provided socket fd %d: %w", i, err)
return
}
providedListeners = append(providedListeners, l)
@ -255,3 +296,36 @@ func getActiveListenersToUnlink() []bool {
copy(listenersToUnlink, activeListenersToUnlink)
return listenersToUnlink
}
func getNotifySocket() (*net.UnixConn, error) {
if err := getProvidedFDs(); err != nil {
// This error will be logged elsewhere
return nil, nil
}
if notifySocketAddr == "" {
return nil, nil
}
socketAddr := &net.UnixAddr{
Name: notifySocketAddr,
Net: "unixgram",
}
notifySocket, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
if err != nil {
log.Warn("failed to dial NOTIFY_SOCKET %s: %v", socketAddr, err)
return nil, err
}
return notifySocket, nil
}
func getWatchdogTimeout() time.Duration {
if err := getProvidedFDs(); err != nil {
// This error will be logged elsewhere
return 0
}
return watchdogTimeout
}

View File

@ -16,6 +16,7 @@ import (
"strings"
"sync"
"syscall"
"time"
)
var killParent sync.Once
@ -70,11 +71,20 @@ func RestartProcess() (int, error) {
// Pass on the environment and replace the old count key with the new one.
var env []string
for _, v := range os.Environ() {
if !strings.HasPrefix(v, listenFDs+"=") {
if !strings.HasPrefix(v, listenFDsEnv+"=") {
env = append(env, v)
}
}
env = append(env, fmt.Sprintf("%s=%d", listenFDs, len(listeners)))
env = append(env, fmt.Sprintf("%s=%d", listenFDsEnv, len(listeners)))
if notifySocketAddr != "" {
env = append(env, fmt.Sprintf("%s=%s", notifySocketEnv, notifySocketAddr))
}
if watchdogTimeout != 0 {
watchdogStr := strconv.FormatInt(int64(watchdogTimeout/time.Millisecond), 10)
env = append(env, fmt.Sprintf("%s=%s", watchdogTimeoutEnv, watchdogStr))
}
sb := &strings.Builder{}
for i, unlink := range getActiveListenersToUnlink() {
@ -87,7 +97,7 @@ func RestartProcess() (int, error) {
unlinkStr := sb.String()
if len(unlinkStr) > 0 {
unlinkStr = unlinkStr[:len(unlinkStr)-1]
env = append(env, fmt.Sprintf("%s=%s", unlinkFDs, unlinkStr))
env = append(env, fmt.Sprintf("%s=%s", unlinkFDsEnv, unlinkStr))
}
allFiles := append([]*os.File{os.Stdin, os.Stdout, os.Stderr}, files...)

View File

@ -4,10 +4,8 @@
package httpcache
import (
"encoding/base64"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
@ -37,38 +35,9 @@ func SetCacheControlInHeader(h http.Header, maxAge time.Duration, additionalDire
h.Set("Cache-Control", strings.Join(append(directives, additionalDirectives...), ", "))
}
// generateETag generates an ETag based on size, filename and file modification time
func generateETag(fi os.FileInfo) string {
etag := fmt.Sprint(fi.Size()) + fi.Name() + fi.ModTime().UTC().Format(http.TimeFormat)
return `"` + base64.StdEncoding.EncodeToString([]byte(etag)) + `"`
}
// HandleTimeCache handles time-based caching for a HTTP request
func HandleTimeCache(req *http.Request, w http.ResponseWriter, fi os.FileInfo) (handled bool) {
return HandleGenericTimeCache(req, w, fi.ModTime())
}
// HandleGenericTimeCache handles time-based caching for a HTTP request
func HandleGenericTimeCache(req *http.Request, w http.ResponseWriter, lastModified time.Time) (handled bool) {
func ServeContentWithCacheControl(w http.ResponseWriter, req *http.Request, name string, modTime time.Time, content io.ReadSeeker) {
SetCacheControlInHeader(w.Header(), setting.StaticCacheTime)
ifModifiedSince := req.Header.Get("If-Modified-Since")
if ifModifiedSince != "" {
t, err := time.Parse(http.TimeFormat, ifModifiedSince)
if err == nil && lastModified.Unix() <= t.Unix() {
w.WriteHeader(http.StatusNotModified)
return true
}
}
w.Header().Set("Last-Modified", lastModified.Format(http.TimeFormat))
return false
}
// HandleFileETagCache handles ETag-based caching for a HTTP request
func HandleFileETagCache(req *http.Request, w http.ResponseWriter, fi os.FileInfo) (handled bool) {
etag := generateETag(fi)
return HandleGenericETagCache(req, w, etag)
http.ServeContent(w, req, name, modTime, content)
}
// HandleGenericETagCache handles ETag-based caching for a HTTP request.

View File

@ -6,23 +6,12 @@ package httpcache
import (
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
type mockFileInfo struct{}
func (m mockFileInfo) Name() string { return "gitea.test" }
func (m mockFileInfo) Size() int64 { return int64(10) }
func (m mockFileInfo) Mode() os.FileMode { return os.ModePerm }
func (m mockFileInfo) ModTime() time.Time { return time.Time{} }
func (m mockFileInfo) IsDir() bool { return false }
func (m mockFileInfo) Sys() interface{} { return nil }
func countFormalHeaders(h http.Header) (c int) {
for k := range h {
// ignore our headers for internal usage
@ -34,52 +23,6 @@ func countFormalHeaders(h http.Header) (c int) {
return c
}
func TestHandleFileETagCache(t *testing.T) {
fi := mockFileInfo{}
etag := `"MTBnaXRlYS50ZXN0TW9uLCAwMSBKYW4gMDAwMSAwMDowMDowMCBHTVQ="`
t.Run("No_If-None-Match", func(t *testing.T) {
req := &http.Request{Header: make(http.Header)}
w := httptest.NewRecorder()
handled := HandleFileETagCache(req, w, fi)
assert.False(t, handled)
assert.Equal(t, 2, countFormalHeaders(w.Header()))
assert.Contains(t, w.Header(), "Cache-Control")
assert.Contains(t, w.Header(), "Etag")
assert.Equal(t, etag, w.Header().Get("Etag"))
})
t.Run("Wrong_If-None-Match", func(t *testing.T) {
req := &http.Request{Header: make(http.Header)}
w := httptest.NewRecorder()
req.Header.Set("If-None-Match", `"wrong etag"`)
handled := HandleFileETagCache(req, w, fi)
assert.False(t, handled)
assert.Equal(t, 2, countFormalHeaders(w.Header()))
assert.Contains(t, w.Header(), "Cache-Control")
assert.Contains(t, w.Header(), "Etag")
assert.Equal(t, etag, w.Header().Get("Etag"))
})
t.Run("Correct_If-None-Match", func(t *testing.T) {
req := &http.Request{Header: make(http.Header)}
w := httptest.NewRecorder()
req.Header.Set("If-None-Match", etag)
handled := HandleFileETagCache(req, w, fi)
assert.True(t, handled)
assert.Equal(t, 1, countFormalHeaders(w.Header()))
assert.Contains(t, w.Header(), "Etag")
assert.Equal(t, etag, w.Header().Get("Etag"))
assert.Equal(t, http.StatusNotModified, w.Code)
})
}
func TestHandleGenericETagCache(t *testing.T) {
etag := `"test"`

225
modules/httplib/serve.go Normal file
View File

@ -0,0 +1,225 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package httplib
import (
"bytes"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"path/filepath"
"strconv"
"strings"
"time"
charsetModule "code.gitea.io/gitea/modules/charset"
"code.gitea.io/gitea/modules/httpcache"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/typesniffer"
"code.gitea.io/gitea/modules/util"
)
type ServeHeaderOptions struct {
ContentType string // defaults to "application/octet-stream"
ContentTypeCharset string
ContentLength *int64
Disposition string // defaults to "attachment"
Filename string
CacheDuration time.Duration // defaults to 5 minutes
LastModified time.Time
}
// ServeSetHeaders sets necessary content serve headers
func ServeSetHeaders(w http.ResponseWriter, opts *ServeHeaderOptions) {
header := w.Header()
contentType := typesniffer.ApplicationOctetStream
if opts.ContentType != "" {
if opts.ContentTypeCharset != "" {
contentType = opts.ContentType + "; charset=" + strings.ToLower(opts.ContentTypeCharset)
} else {
contentType = opts.ContentType
}
}
header.Set("Content-Type", contentType)
header.Set("X-Content-Type-Options", "nosniff")
if opts.ContentLength != nil {
header.Set("Content-Length", strconv.FormatInt(*opts.ContentLength, 10))
}
if opts.Filename != "" {
disposition := opts.Disposition
if disposition == "" {
disposition = "attachment"
}
backslashEscapedName := strings.ReplaceAll(strings.ReplaceAll(opts.Filename, `\`, `\\`), `"`, `\"`) // \ -> \\, " -> \"
header.Set("Content-Disposition", fmt.Sprintf(`%s; filename="%s"; filename*=UTF-8''%s`, disposition, backslashEscapedName, url.PathEscape(opts.Filename)))
header.Set("Access-Control-Expose-Headers", "Content-Disposition")
}
duration := opts.CacheDuration
if duration == 0 {
duration = 5 * time.Minute
}
httpcache.SetCacheControlInHeader(header, duration)
if !opts.LastModified.IsZero() {
header.Set("Last-Modified", opts.LastModified.UTC().Format(http.TimeFormat))
}
}
// ServeData download file from io.Reader
func setServeHeadersByFile(r *http.Request, w http.ResponseWriter, filePath string, mineBuf []byte) {
// do not set "Content-Length", because the length could only be set by callers, and it needs to support range requests
opts := &ServeHeaderOptions{
Filename: path.Base(filePath),
}
sniffedType := typesniffer.DetectContentType(mineBuf)
// the "render" parameter came from year 2016: 638dd24c, it doesn't have clear meaning, so I think it could be removed later
isPlain := sniffedType.IsText() || r.FormValue("render") != ""
if setting.MimeTypeMap.Enabled {
fileExtension := strings.ToLower(filepath.Ext(filePath))
opts.ContentType = setting.MimeTypeMap.Map[fileExtension]
}
if opts.ContentType == "" {
if sniffedType.IsBrowsableBinaryType() {
opts.ContentType = sniffedType.GetMimeType()
} else if isPlain {
opts.ContentType = "text/plain"
} else {
opts.ContentType = typesniffer.ApplicationOctetStream
}
}
if isPlain {
charset, err := charsetModule.DetectEncoding(mineBuf)
if err != nil {
log.Error("Detect raw file %s charset failed: %v, using by default utf-8", filePath, err)
charset = "utf-8"
}
opts.ContentTypeCharset = strings.ToLower(charset)
}
isSVG := sniffedType.IsSvgImage()
// serve types that can present a security risk with CSP
if isSVG {
w.Header().Set("Content-Security-Policy", "default-src 'none'; style-src 'unsafe-inline'; sandbox")
} else if sniffedType.IsPDF() {
// no sandbox attribute for pdf as it breaks rendering in at least safari. this
// should generally be safe as scripts inside PDF can not escape the PDF document
// see https://bugs.chromium.org/p/chromium/issues/detail?id=413851 for more discussion
w.Header().Set("Content-Security-Policy", "default-src 'none'; style-src 'unsafe-inline'")
}
opts.Disposition = "inline"
if isSVG && !setting.UI.SVG.Enabled {
opts.Disposition = "attachment"
}
ServeSetHeaders(w, opts)
}
const mimeDetectionBufferLen = 1024
func ServeContentByReader(r *http.Request, w http.ResponseWriter, filePath string, size int64, reader io.Reader) {
buf := make([]byte, mimeDetectionBufferLen)
n, err := util.ReadAtMost(reader, buf)
if err != nil {
http.Error(w, "serve content: unable to pre-read", http.StatusRequestedRangeNotSatisfiable)
return
}
if n >= 0 {
buf = buf[:n]
}
setServeHeadersByFile(r, w, filePath, buf)
// reset the reader to the beginning
reader = io.MultiReader(bytes.NewReader(buf), reader)
rangeHeader := r.Header.Get("Range")
// if no size or no supported range, serve as 200 (complete response)
if size <= 0 || !strings.HasPrefix(rangeHeader, "bytes=") {
if size >= 0 {
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
}
_, _ = io.Copy(w, reader) // just like http.ServeContent, not necessary to handle the error
return
}
// do our best to support the minimal "Range" request (no support for multiple range: "Range: bytes=0-50, 100-150")
//
// GET /...
// Range: bytes=0-1023
//
// HTTP/1.1 206 Partial Content
// Content-Range: bytes 0-1023/146515
// Content-Length: 1024
_, rangeParts, _ := strings.Cut(rangeHeader, "=")
rangeBytesStart, rangeBytesEnd, found := strings.Cut(rangeParts, "-")
start, err := strconv.ParseInt(rangeBytesStart, 10, 64)
if start < 0 || start >= size {
err = errors.New("invalid start range")
}
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
end, err := strconv.ParseInt(rangeBytesEnd, 10, 64)
if rangeBytesEnd == "" && found {
err = nil
end = size - 1
}
if end >= size {
end = size - 1
}
if end < start {
err = errors.New("invalid end range")
}
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
partialLength := end - start + 1
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size))
w.Header().Set("Content-Length", strconv.FormatInt(partialLength, 10))
if _, err = io.CopyN(io.Discard, reader, start); err != nil {
http.Error(w, "serve content: unable to skip", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusPartialContent)
_, _ = io.CopyN(w, reader, partialLength) // just like http.ServeContent, not necessary to handle the error
}
func ServeContentByReadSeeker(r *http.Request, w http.ResponseWriter, filePath string, modTime time.Time, reader io.ReadSeeker) {
buf := make([]byte, mimeDetectionBufferLen)
n, err := util.ReadAtMost(reader, buf)
if err != nil {
http.Error(w, "serve content: unable to read", http.StatusInternalServerError)
return
}
if _, err = reader.Seek(0, io.SeekStart); err != nil {
http.Error(w, "serve content: unable to seek", http.StatusInternalServerError)
return
}
if n >= 0 {
buf = buf[:n]
}
setServeHeadersByFile(r, w, filePath, buf)
http.ServeContent(w, r, path.Base(filePath), modTime, reader)
}

View File

@ -0,0 +1,110 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package httplib
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestServeContentByReader(t *testing.T) {
data := "0123456789abcdef"
test := func(t *testing.T, expectedStatusCode int, expectedContent string) {
_, rangeStr, _ := strings.Cut(t.Name(), "_range_")
r := &http.Request{Header: http.Header{}, Form: url.Values{}}
if rangeStr != "" {
r.Header.Set("Range", fmt.Sprintf("bytes=%s", rangeStr))
}
reader := strings.NewReader(data)
w := httptest.NewRecorder()
ServeContentByReader(r, w, "test", int64(len(data)), reader)
assert.Equal(t, expectedStatusCode, w.Code)
if expectedStatusCode == http.StatusPartialContent || expectedStatusCode == http.StatusOK {
assert.Equal(t, fmt.Sprint(len(expectedContent)), w.Header().Get("Content-Length"))
assert.Equal(t, expectedContent, w.Body.String())
}
}
t.Run("_range_", func(t *testing.T) {
test(t, http.StatusOK, data)
})
t.Run("_range_0-", func(t *testing.T) {
test(t, http.StatusPartialContent, data)
})
t.Run("_range_0-15", func(t *testing.T) {
test(t, http.StatusPartialContent, data)
})
t.Run("_range_1-", func(t *testing.T) {
test(t, http.StatusPartialContent, data[1:])
})
t.Run("_range_1-3", func(t *testing.T) {
test(t, http.StatusPartialContent, data[1:3+1])
})
t.Run("_range_16-", func(t *testing.T) {
test(t, http.StatusRequestedRangeNotSatisfiable, "")
})
t.Run("_range_1-99999", func(t *testing.T) {
test(t, http.StatusPartialContent, data[1:])
})
}
func TestServeContentByReadSeeker(t *testing.T) {
data := "0123456789abcdef"
tmpFile := t.TempDir() + "/test"
err := os.WriteFile(tmpFile, []byte(data), 0o644)
assert.NoError(t, err)
test := func(t *testing.T, expectedStatusCode int, expectedContent string) {
_, rangeStr, _ := strings.Cut(t.Name(), "_range_")
r := &http.Request{Header: http.Header{}, Form: url.Values{}}
if rangeStr != "" {
r.Header.Set("Range", fmt.Sprintf("bytes=%s", rangeStr))
}
seekReader, err := os.OpenFile(tmpFile, os.O_RDONLY, 0o644)
if !assert.NoError(t, err) {
return
}
defer seekReader.Close()
w := httptest.NewRecorder()
ServeContentByReadSeeker(r, w, "test", time.Time{}, seekReader)
assert.Equal(t, expectedStatusCode, w.Code)
if expectedStatusCode == http.StatusPartialContent || expectedStatusCode == http.StatusOK {
assert.Equal(t, fmt.Sprint(len(expectedContent)), w.Header().Get("Content-Length"))
assert.Equal(t, expectedContent, w.Body.String())
}
}
t.Run("_range_", func(t *testing.T) {
test(t, http.StatusOK, data)
})
t.Run("_range_0-", func(t *testing.T) {
test(t, http.StatusPartialContent, data)
})
t.Run("_range_0-15", func(t *testing.T) {
test(t, http.StatusPartialContent, data)
})
t.Run("_range_1-", func(t *testing.T) {
test(t, http.StatusPartialContent, data[1:])
})
t.Run("_range_1-3", func(t *testing.T) {
test(t, http.StatusPartialContent, data[1:3+1])
})
t.Run("_range_16-", func(t *testing.T) {
test(t, http.StatusRequestedRangeNotSatisfiable, "")
})
t.Run("_range_1-99999", func(t *testing.T) {
test(t, http.StatusPartialContent, data[1:])
})
}

View File

@ -26,7 +26,7 @@ func TestMain(m *testing.M) {
func TestBleveSearchIssues(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
setting.CfgProvider = setting.NewEmptyConfigProvider()
setting.CfgProvider, _ = setting.NewConfigProviderFromData("")
tmpIndexerDir := t.TempDir()

View File

@ -28,7 +28,7 @@ func TestMain(m *testing.M) {
func TestRepoStatsIndex(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
setting.CfgProvider = setting.NewEmptyConfigProvider()
setting.CfgProvider, _ = setting.NewConfigProviderFromData("")
setting.LoadQueueSettings()

View File

@ -18,9 +18,9 @@ import (
var (
// ErrHashMismatch occurs if the content has does not match OID
ErrHashMismatch = errors.New("Content hash does not match OID")
ErrHashMismatch = errors.New("content hash does not match OID")
// ErrSizeMismatch occurs if the content size does not match
ErrSizeMismatch = errors.New("Content size does not match")
ErrSizeMismatch = errors.New("content size does not match")
)
// ContentStore provides a simple file system based storage.
@ -105,7 +105,7 @@ func (s *ContentStore) Verify(pointer Pointer) (bool, error) {
}
// ReadMetaObject will read a git_model.LFSMetaObject and return a reader
func ReadMetaObject(pointer Pointer) (io.ReadCloser, error) {
func ReadMetaObject(pointer Pointer) (io.ReadSeekCloser, error) {
contentStore := NewContentStore()
return contentStore.Get(pointer)
}

View File

@ -0,0 +1,236 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package alpine
import (
"archive/tar"
"bufio"
"compress/gzip"
"crypto/sha1"
"encoding/base64"
"io"
"strconv"
"strings"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/validation"
)
var (
ErrMissingPKGINFOFile = util.NewInvalidArgumentErrorf("PKGINFO file is missing")
ErrInvalidName = util.NewInvalidArgumentErrorf("package name is invalid")
ErrInvalidVersion = util.NewInvalidArgumentErrorf("package version is invalid")
)
const (
PropertyMetadata = "alpine.metadata"
PropertyBranch = "alpine.branch"
PropertyRepository = "alpine.repository"
PropertyArchitecture = "alpine.architecture"
SettingKeyPrivate = "alpine.key.private"
SettingKeyPublic = "alpine.key.public"
RepositoryPackage = "_alpine"
RepositoryVersion = "_repository"
)
// https://wiki.alpinelinux.org/wiki/Apk_spec
// Package represents an Alpine package
type Package struct {
Name string
Version string
VersionMetadata VersionMetadata
FileMetadata FileMetadata
}
// Metadata of an Alpine package
type VersionMetadata struct {
Description string `json:"description,omitempty"`
License string `json:"license,omitempty"`
ProjectURL string `json:"project_url,omitempty"`
Maintainer string `json:"maintainer,omitempty"`
}
type FileMetadata struct {
Checksum string `json:"checksum"`
Packager string `json:"packager,omitempty"`
BuildDate int64 `json:"build_date,omitempty"`
Size int64 `json:"size,omitempty"`
Architecture string `json:"architecture,omitempty"`
Origin string `json:"origin,omitempty"`
CommitHash string `json:"commit_hash,omitempty"`
InstallIf string `json:"install_if,omitempty"`
Provides []string `json:"provides,omitempty"`
Dependencies []string `json:"dependencies,omitempty"`
}
// ParsePackage parses the Alpine package file
func ParsePackage(r io.Reader) (*Package, error) {
// Alpine packages are concated .tar.gz streams. Usually the first stream contains the package metadata.
br := bufio.NewReader(r) // needed for gzip Multistream
h := sha1.New()
gzr, err := gzip.NewReader(&teeByteReader{br, h})
if err != nil {
return nil, err
}
defer gzr.Close()
for {
gzr.Multistream(false)
tr := tar.NewReader(gzr)
for {
hd, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if hd.Name == ".PKGINFO" {
p, err := ParsePackageInfo(tr)
if err != nil {
return nil, err
}
// drain the reader
for {
if _, err := tr.Next(); err != nil {
break
}
}
p.FileMetadata.Checksum = "Q1" + base64.StdEncoding.EncodeToString(h.Sum(nil))
return p, nil
}
}
h = sha1.New()
err = gzr.Reset(&teeByteReader{br, h})
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
}
return nil, ErrMissingPKGINFOFile
}
// ParsePackageInfo parses a PKGINFO file to retrieve the metadata of an Alpine package
func ParsePackageInfo(r io.Reader) (*Package, error) {
p := &Package{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "#") {
continue
}
i := strings.IndexRune(line, '=')
if i == -1 {
continue
}
key := strings.TrimSpace(line[:i])
value := strings.TrimSpace(line[i+1:])
switch key {
case "pkgname":
p.Name = value
case "pkgver":
p.Version = value
case "pkgdesc":
p.VersionMetadata.Description = value
case "url":
p.VersionMetadata.ProjectURL = value
case "builddate":
n, err := strconv.ParseInt(value, 10, 64)
if err == nil {
p.FileMetadata.BuildDate = n
}
case "size":
n, err := strconv.ParseInt(value, 10, 64)
if err == nil {
p.FileMetadata.Size = n
}
case "arch":
p.FileMetadata.Architecture = value
case "origin":
p.FileMetadata.Origin = value
case "commit":
p.FileMetadata.CommitHash = value
case "maintainer":
p.VersionMetadata.Maintainer = value
case "packager":
p.FileMetadata.Packager = value
case "license":
p.VersionMetadata.License = value
case "install_if":
p.FileMetadata.InstallIf = value
case "provides":
if value != "" {
p.FileMetadata.Provides = append(p.FileMetadata.Provides, value)
}
case "depend":
if value != "" {
p.FileMetadata.Dependencies = append(p.FileMetadata.Dependencies, value)
}
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
if p.Name == "" {
return nil, ErrInvalidName
}
if p.Version == "" {
return nil, ErrInvalidVersion
}
if !validation.IsValidURL(p.VersionMetadata.ProjectURL) {
p.VersionMetadata.ProjectURL = ""
}
return p, nil
}
// Same as io.TeeReader but implements io.ByteReader
type teeByteReader struct {
r *bufio.Reader
w io.Writer
}
func (t *teeByteReader) Read(p []byte) (int, error) {
n, err := t.r.Read(p)
if n > 0 {
if n, err := t.w.Write(p[:n]); err != nil {
return n, err
}
}
return n, err
}
func (t *teeByteReader) ReadByte() (byte, error) {
b, err := t.r.ReadByte()
if err == nil {
if _, err := t.w.Write([]byte{b}); err != nil {
return 0, err
}
}
return b, err
}

View File

@ -0,0 +1,143 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package alpine
import (
"archive/tar"
"bytes"
"compress/gzip"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
const (
packageName = "gitea"
packageVersion = "1.0.1"
packageDescription = "Package Description"
packageProjectURL = "https://gitea.io"
packageMaintainer = "KN4CK3R <dummy@gitea.io>"
)
func createPKGINFOContent(name, version string) []byte {
return []byte(`pkgname = ` + name + `
pkgver = ` + version + `
pkgdesc = ` + packageDescription + `
url = ` + packageProjectURL + `
# comment
builddate = 1678834800
packager = Gitea <pack@ag.er>
size = 123456
arch = aarch64
origin = origin
commit = 1111e709613fbc979651b09ac2bc27c6591a9999
maintainer = ` + packageMaintainer + `
license = MIT
depend = common
install_if = value
depend = gitea
provides = common
provides = gitea`)
}
func TestParsePackage(t *testing.T) {
createPackage := func(name string, content []byte) io.Reader {
names := []string{"first.stream", name}
contents := [][]byte{{0}, content}
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
for i := range names {
if i != 0 {
zw.Close()
zw.Reset(&buf)
}
tw := tar.NewWriter(zw)
hdr := &tar.Header{
Name: names[i],
Mode: 0o600,
Size: int64(len(contents[i])),
}
tw.WriteHeader(hdr)
tw.Write(contents[i])
tw.Close()
}
zw.Close()
return &buf
}
t.Run("MissingPKGINFOFile", func(t *testing.T) {
data := createPackage("dummy.txt", []byte{})
pp, err := ParsePackage(data)
assert.Nil(t, pp)
assert.ErrorIs(t, err, ErrMissingPKGINFOFile)
})
t.Run("InvalidPKGINFOFile", func(t *testing.T) {
data := createPackage(".PKGINFO", []byte{})
pp, err := ParsePackage(data)
assert.Nil(t, pp)
assert.ErrorIs(t, err, ErrInvalidName)
})
t.Run("Valid", func(t *testing.T) {
data := createPackage(".PKGINFO", createPKGINFOContent(packageName, packageVersion))
p, err := ParsePackage(data)
assert.NoError(t, err)
assert.NotNil(t, p)
assert.Equal(t, "Q1SRYURM5+uQDqfHSwTnNIOIuuDVQ=", p.FileMetadata.Checksum)
})
}
func TestParsePackageInfo(t *testing.T) {
t.Run("InvalidName", func(t *testing.T) {
data := createPKGINFOContent("", packageVersion)
p, err := ParsePackageInfo(bytes.NewReader(data))
assert.Nil(t, p)
assert.ErrorIs(t, err, ErrInvalidName)
})
t.Run("InvalidVersion", func(t *testing.T) {
data := createPKGINFOContent(packageName, "")
p, err := ParsePackageInfo(bytes.NewReader(data))
assert.Nil(t, p)
assert.ErrorIs(t, err, ErrInvalidVersion)
})
t.Run("Valid", func(t *testing.T) {
data := createPKGINFOContent(packageName, packageVersion)
p, err := ParsePackageInfo(bytes.NewReader(data))
assert.NoError(t, err)
assert.NotNil(t, p)
assert.Equal(t, packageName, p.Name)
assert.Equal(t, packageVersion, p.Version)
assert.Equal(t, packageDescription, p.VersionMetadata.Description)
assert.Equal(t, packageMaintainer, p.VersionMetadata.Maintainer)
assert.Equal(t, packageProjectURL, p.VersionMetadata.ProjectURL)
assert.Equal(t, "MIT", p.VersionMetadata.License)
assert.Empty(t, p.FileMetadata.Checksum)
assert.Equal(t, "Gitea <pack@ag.er>", p.FileMetadata.Packager)
assert.EqualValues(t, 1678834800, p.FileMetadata.BuildDate)
assert.EqualValues(t, 123456, p.FileMetadata.Size)
assert.Equal(t, "aarch64", p.FileMetadata.Architecture)
assert.Equal(t, "origin", p.FileMetadata.Origin)
assert.Equal(t, "1111e709613fbc979651b09ac2bc27c6591a9999", p.FileMetadata.CommitHash)
assert.Equal(t, "value", p.FileMetadata.InstallIf)
assert.ElementsMatch(t, []string{"common", "gitea"}, p.FileMetadata.Provides)
assert.ElementsMatch(t, []string{"common", "gitea"}, p.FileMetadata.Dependencies)
})
}

View File

@ -0,0 +1,94 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package goproxy
import (
"archive/zip"
"fmt"
"io"
"path"
"strings"
"code.gitea.io/gitea/modules/util"
)
const (
PropertyGoMod = "go.mod"
maxGoModFileSize = 16 * 1024 * 1024 // https://go.dev/ref/mod#zip-path-size-constraints
)
var (
ErrInvalidStructure = util.NewInvalidArgumentErrorf("package has invalid structure")
ErrGoModFileTooLarge = util.NewInvalidArgumentErrorf("go.mod file is too large")
)
type Package struct {
Name string
Version string
GoMod string
}
// ParsePackage parses the Go package file
// https://go.dev/ref/mod#zip-files
func ParsePackage(r io.ReaderAt, size int64) (*Package, error) {
archive, err := zip.NewReader(r, size)
if err != nil {
return nil, err
}
var p *Package
for _, file := range archive.File {
nameAndVersion := path.Dir(file.Name)
parts := strings.SplitN(nameAndVersion, "@", 2)
if len(parts) != 2 {
continue
}
versionParts := strings.SplitN(parts[1], "/", 2)
if p == nil {
p = &Package{
Name: strings.TrimSuffix(nameAndVersion, "@"+parts[1]),
Version: versionParts[0],
}
}
if len(versionParts) > 1 {
// files are expected in the "root" folder
continue
}
if path.Base(file.Name) == "go.mod" {
if file.UncompressedSize64 > maxGoModFileSize {
return nil, ErrGoModFileTooLarge
}
f, err := archive.Open(file.Name)
if err != nil {
return nil, err
}
defer f.Close()
bytes, err := io.ReadAll(&io.LimitedReader{R: f, N: maxGoModFileSize})
if err != nil {
return nil, err
}
p.GoMod = string(bytes)
return p, nil
}
}
if p == nil {
return nil, ErrInvalidStructure
}
p.GoMod = fmt.Sprintf("module %s", p.Name)
return p, nil
}

View File

@ -0,0 +1,75 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package goproxy
import (
"archive/zip"
"bytes"
"testing"
"github.com/stretchr/testify/assert"
)
const (
packageName = "gitea.com/go-gitea/gitea"
packageVersion = "v0.0.1"
)
func TestParsePackage(t *testing.T) {
createArchive := func(files map[string][]byte) *bytes.Reader {
var buf bytes.Buffer
zw := zip.NewWriter(&buf)
for name, content := range files {
w, _ := zw.Create(name)
w.Write(content)
}
zw.Close()
return bytes.NewReader(buf.Bytes())
}
t.Run("EmptyPackage", func(t *testing.T) {
data := createArchive(nil)
p, err := ParsePackage(data, int64(data.Len()))
assert.Nil(t, p)
assert.ErrorIs(t, err, ErrInvalidStructure)
})
t.Run("InvalidNameOrVersionStructure", func(t *testing.T) {
data := createArchive(map[string][]byte{
packageName + "/" + packageVersion + "/go.mod": {},
})
p, err := ParsePackage(data, int64(data.Len()))
assert.Nil(t, p)
assert.ErrorIs(t, err, ErrInvalidStructure)
})
t.Run("GoModFileInWrongDirectory", func(t *testing.T) {
data := createArchive(map[string][]byte{
packageName + "@" + packageVersion + "/subdir/go.mod": {},
})
p, err := ParsePackage(data, int64(data.Len()))
assert.NotNil(t, p)
assert.NoError(t, err)
assert.Equal(t, packageName, p.Name)
assert.Equal(t, packageVersion, p.Version)
assert.Equal(t, "module gitea.com/go-gitea/gitea", p.GoMod)
})
t.Run("Valid", func(t *testing.T) {
data := createArchive(map[string][]byte{
packageName + "@" + packageVersion + "/subdir/go.mod": []byte("invalid"),
packageName + "@" + packageVersion + "/go.mod": []byte("valid"),
})
p, err := ParsePackage(data, int64(data.Len()))
assert.NotNil(t, p)
assert.NoError(t, err)
assert.Equal(t, packageName, p.Name)
assert.Equal(t, packageVersion, p.Version)
assert.Equal(t, "valid", p.GoMod)
})
}

View File

@ -97,10 +97,6 @@ func handleRequest(w http.ResponseWriter, req *http.Request, fs http.FileSystem,
return true
}
if httpcache.HandleFileETagCache(req, w, fi) {
return true
}
serveContent(w, req, fi, fi.ModTime(), f)
return true
}
@ -124,11 +120,11 @@ func serveContent(w http.ResponseWriter, req *http.Request, fi os.FileInfo, modt
w.Header().Set("Content-Type", "application/octet-stream")
}
w.Header().Set("Content-Encoding", "gzip")
http.ServeContent(w, req, fi.Name(), modtime, rdGzip)
httpcache.ServeContentWithCacheControl(w, req, fi.Name(), modtime, rdGzip)
return
}
}
http.ServeContent(w, req, fi.Name(), modtime, content)
httpcache.ServeContentWithCacheControl(w, req, fi.Name(), modtime, content)
return
}

View File

@ -87,7 +87,9 @@ func (q *baseChannel) PopItem(ctx context.Context) ([]byte, error) {
func (q *baseChannel) HasItem(ctx context.Context, data []byte) (bool, error) {
q.mu.Lock()
defer q.mu.Unlock()
if !q.isUnique {
return false, nil
}
return q.set.Contains(string(data)), nil
}
@ -107,7 +109,9 @@ func (q *baseChannel) Close() error {
defer q.mu.Unlock()
close(q.c)
q.set = container.Set[string]{}
if q.isUnique {
q.set = container.Set[string]{}
}
return nil
}
@ -119,5 +123,9 @@ func (q *baseChannel) RemoveAll(ctx context.Context) error {
for q.c != nil && len(q.c) > 0 {
<-q.c
}
if q.isUnique {
q.set = container.Set[string]{}
}
return nil
}

View File

@ -77,6 +77,14 @@ func (q *baseLevelQueueUnique) RemoveAll(ctx context.Context) error {
}
lq := (*levelUniqueQueue)(unsafe.Pointer(q.internal))
for lq.q.Len() > 0 {
if _, err := lq.q.LPop(); err != nil {
return err
}
}
// the "set" must be cleared after the "list" because there is no transaction.
// it's better to have duplicate items than losing items.
members, err := lq.set.Members()
if err != nil {
return err // seriously corrupted
@ -84,10 +92,5 @@ func (q *baseLevelQueueUnique) RemoveAll(ctx context.Context) error {
for _, v := range members {
_, _ = lq.set.Remove(v)
}
for lq.q.Len() > 0 {
if _, err = lq.q.LPop(); err != nil {
return err
}
}
return nil
}

View File

@ -123,7 +123,10 @@ func (q *baseRedis) Close() error {
func (q *baseRedis) RemoveAll(ctx context.Context) error {
q.mu.Lock()
defer q.mu.Unlock()
c1 := q.client.Del(ctx, q.cfg.QueueFullName)
// the "set" must be cleared after the "list" because there is no transaction.
// it's better to have duplicate items than losing items.
c2 := q.client.Del(ctx, q.cfg.SetFullName)
if c1.Err() != nil {
return c1.Err()

View File

@ -56,7 +56,7 @@ func TestBaseRedis(t *testing.T) {
}()
if !waitRedisReady("redis://127.0.0.1:6379/0", 0) {
redisServer = redisServerCmd(t)
if redisServer == nil && os.Getenv("CI") != "" {
if redisServer == nil && os.Getenv("CI") == "" {
t.Skip("redis-server not found")
return
}

View File

@ -33,6 +33,9 @@ type ManagedWorkerPoolQueue interface {
// FlushWithContext tries to make the handler process all items in the queue synchronously.
// It is for testing purpose only. It's not designed to be used in a cluster.
FlushWithContext(ctx context.Context, timeout time.Duration) error
// RemoveAllItems removes all items in the base queue (on-the-fly items are not affected)
RemoveAllItems(ctx context.Context) error
}
var manager *Manager

View File

@ -130,6 +130,11 @@ func (q *WorkerPoolQueue[T]) FlushWithContext(ctx context.Context, timeout time.
}
}
// RemoveAllItems removes all items in the baes queue
func (q *WorkerPoolQueue[T]) RemoveAllItems(ctx context.Context) error {
return q.baseQueue.RemoveAll(ctx)
}
func (q *WorkerPoolQueue[T]) marshal(data T) []byte {
bs, err := json.Marshal(data)
if err != nil {

View File

@ -6,6 +6,7 @@ package repository
import (
"crypto/md5"
"fmt"
"strconv"
"testing"
"time"
@ -136,13 +137,11 @@ func TestPushCommits_AvatarLink(t *testing.T) {
enableGravatar(t)
assert.Equal(t,
"https://secure.gravatar.com/avatar/ab53a2911ddf9b4817ac01ddcd3d975f?d=identicon&s=84",
"https://secure.gravatar.com/avatar/ab53a2911ddf9b4817ac01ddcd3d975f?d=identicon&s="+strconv.Itoa(28*setting.Avatar.RenderedSizeFactor),
pushCommits.AvatarLink(db.DefaultContext, "user2@example.com"))
assert.Equal(t,
"https://secure.gravatar.com/avatar/"+
fmt.Sprintf("%x", md5.Sum([]byte("nonexistent@example.com")))+
"?d=identicon&s=84",
fmt.Sprintf("https://secure.gravatar.com/avatar/%x?d=identicon&s=%d", md5.Sum([]byte("nonexistent@example.com")), 28*setting.Avatar.RenderedSizeFactor),
pushCommits.AvatarLink(db.DefaultContext, "nonexistent@example.com"))
}

View File

@ -30,7 +30,6 @@ type ConfigProvider interface {
Section(section string) ConfigSection
NewSection(name string) (ConfigSection, error)
GetSection(name string) (ConfigSection, error)
DeleteSection(name string) error
Save() error
}
@ -40,12 +39,6 @@ type iniFileConfigProvider struct {
newFile bool // whether the file has not existed previously
}
// NewEmptyConfigProvider create a new empty config provider
func NewEmptyConfigProvider() ConfigProvider {
cp, _ := NewConfigProviderFromData("")
return cp
}
// NewConfigProviderFromData this function is only for testing
func NewConfigProviderFromData(configContent string) (ConfigProvider, error) {
var cfg *ini.File
@ -121,11 +114,6 @@ func (p *iniFileConfigProvider) GetSection(name string) (ConfigSection, error) {
return p.File.GetSection(name)
}
func (p *iniFileConfigProvider) DeleteSection(name string) error {
p.File.DeleteSection(name)
return nil
}
// Save save the content into file
func (p *iniFileConfigProvider) Save() error {
if p.opts.CustomConf == "" {

View File

@ -45,7 +45,7 @@ func loadLFSFrom(rootCfg ConfigProvider) {
LFS.LocksPagingNum = 50
}
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(20 * time.Minute)
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(24 * time.Hour)
if LFS.StartServer {
LFS.JWTSecretBytes = make([]byte, 32)

View File

@ -10,7 +10,6 @@ import (
)
func Test_loadMailerFrom(t *testing.T) {
iniFile := NewEmptyConfigProvider()
kases := map[string]*Mailer{
"smtp.mydomain.com": {
SMTPAddr: "smtp.mydomain.com",
@ -27,13 +26,13 @@ func Test_loadMailerFrom(t *testing.T) {
}
for host, kase := range kases {
t.Run(host, func(t *testing.T) {
iniFile.DeleteSection("mailer")
sec := iniFile.Section("mailer")
cfg, _ := NewConfigProviderFromData("")
sec := cfg.Section("mailer")
sec.NewKey("ENABLED", "true")
sec.NewKey("HOST", host)
// Check mailer setting
loadMailerFrom(iniFile)
loadMailerFrom(cfg)
assert.EqualValues(t, kase.SMTPAddr, MailService.SMTPAddr)
assert.EqualValues(t, kase.SMTPPort, MailService.SMTPPort)

View File

@ -24,6 +24,7 @@ var (
LimitTotalOwnerCount int64
LimitTotalOwnerSize int64
LimitSizeAlpine int64
LimitSizeCargo int64
LimitSizeChef int64
LimitSizeComposer int64
@ -32,6 +33,7 @@ var (
LimitSizeContainer int64
LimitSizeDebian int64
LimitSizeGeneric int64
LimitSizeGo int64
LimitSizeHelm int64
LimitSizeMaven int64
LimitSizeNpm int64
@ -69,6 +71,7 @@ func loadPackagesFrom(rootCfg ConfigProvider) {
}
Packages.LimitTotalOwnerSize = mustBytes(sec, "LIMIT_TOTAL_OWNER_SIZE")
Packages.LimitSizeAlpine = mustBytes(sec, "LIMIT_SIZE_ALPINE")
Packages.LimitSizeCargo = mustBytes(sec, "LIMIT_SIZE_CARGO")
Packages.LimitSizeChef = mustBytes(sec, "LIMIT_SIZE_CHEF")
Packages.LimitSizeComposer = mustBytes(sec, "LIMIT_SIZE_COMPOSER")
@ -77,6 +80,7 @@ func loadPackagesFrom(rootCfg ConfigProvider) {
Packages.LimitSizeContainer = mustBytes(sec, "LIMIT_SIZE_CONTAINER")
Packages.LimitSizeDebian = mustBytes(sec, "LIMIT_SIZE_DEBIAN")
Packages.LimitSizeGeneric = mustBytes(sec, "LIMIT_SIZE_GENERIC")
Packages.LimitSizeGo = mustBytes(sec, "LIMIT_SIZE_GO")
Packages.LimitSizeHelm = mustBytes(sec, "LIMIT_SIZE_HELM")
Packages.LimitSizeMaven = mustBytes(sec, "LIMIT_SIZE_MAVEN")
Packages.LimitSizeNpm = mustBytes(sec, "LIMIT_SIZE_NPM")

View File

@ -7,12 +7,13 @@ import (
"testing"
"github.com/stretchr/testify/assert"
ini "gopkg.in/ini.v1"
)
func TestMustBytes(t *testing.T) {
test := func(value string) int64 {
sec, _ := ini.Empty().NewSection("test")
cfg, err := NewConfigProviderFromData("[test]")
assert.NoError(t, err)
sec := cfg.Section("test")
sec.NewKey("VALUE", value)
return mustBytes(sec, "VALUE")

Some files were not shown because too many files have changed in this diff Show More