mirror of
https://github.com/go-gitea/gitea
synced 2024-12-23 10:57:52 +01:00
1e4be0945b
To help #31813, but do not replace it, since this PR just introduces the new module but misses some work: - New option in settings. `#31813` has done it. - Use the locks in business logic. `#31813` has done it. So I think the most efficient way is to merge this PR first (if it's acceptable) and then finish #31813. ## Design principles ### Use spinlock even in memory implementation In actual use cases, users may cancel requests. `sync.Mutex` will block the goroutine until the lock is acquired even if the request is canceled. And the spinlock is more suitable for this scenario since it's possible to give up the lock acquisition. Although the spinlock consumes more CPU resources, I think it's acceptable in most cases. ### Do not expose the mutex to callers If we expose the mutex to callers, it's possible for callers to reuse the mutex, which causes more complexity. For example: ```go lock := GetLocker(key) lock.Lock() // ... // even if the lock is unlocked, we cannot GC the lock, // since the caller may still use it again. lock.Unlock() lock.Lock() // ... lock.Unlock() // callers have to GC the lock manually. RemoveLocker(key) ``` That's why https://github.com/go-gitea/gitea/pull/31813#discussion_r1721200549 In this PR, we only expose `ReleaseFunc` to callers. So callers just need to call `ReleaseFunc` to release the lock, and do not need to care about the lock's lifecycle. ```go _, release, err := locker.Lock(ctx, key) if err != nil { return err } // ... release() // if callers want to lock again, they have to re-acquire the lock. _, release, err := locker.Lock(ctx, key) // ... ``` In this way, it's also much easier for redis implementation to extend the mutex automatically, so that callers do not need to care about the lock's lifecycle. See also https://github.com/go-gitea/gitea/pull/31813#discussion_r1722659743 ### Use "release" instead of "unlock" For "unlock", it has the meaning of "unlock an acquired lock". So it's not acceptable to call "unlock" when failed to acquire the lock, or call "unlock" multiple times. It causes more complexity for callers to decide whether to call "unlock" or not. So we use "release" instead of "unlock" to make it clear. Whether the lock is acquired or not, callers can always call "release", and it's also safe to call "release" multiple times. But the code DO NOT expect callers to not call "release" after acquiring the lock. If callers forget to call "release", it will cause resource leak. That's why it's always safe to call "release" without extra checks: to avoid callers to forget to call it. ### Acquired locks could be lost Unlike `sync.Mutex` which will be locked forever once acquired until calling `Unlock`, in the new module, the acquired lock could be lost. For example, the caller has acquired the lock, and it holds the lock for a long time since auto-extending is working for redis. However, it lost the connection to the redis server, and it's impossible to extend the lock anymore. If the caller don't stop what it's doing, another instance which can connect to the redis server could acquire the lock, and do the same thing, which could cause data inconsistency. So the caller should know what happened, the solution is to return a new context which will be canceled if the lock is lost or released: ```go ctx, release, err := locker.Lock(ctx, key) if err != nil { return err } defer release() // ... DoSomething(ctx) // the lock is lost now, then ctx has been canceled. // Failed, since ctx has been canceled. DoSomethingElse(ctx) ``` ### Multiple ways to use the lock 1. Regular way ```go ctx, release, err := Lock(ctx, key) if err != nil { return err } defer release() // ... ``` 2. Early release ```go ctx, release, err := Lock(ctx, key) if err != nil { return err } defer release() // ... // release the lock earlier and reset the context back ctx = release() // continue to do something else // ... ``` 3. Functional way ```go if err := LockAndDo(ctx, key, func(ctx context.Context) error { // ... return nil }); err != nil { return err } ```
155 lines
3.6 KiB
Go
155 lines
3.6 KiB
Go
// Copyright 2024 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package globallock
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"code.gitea.io/gitea/modules/nosql"
|
|
|
|
"github.com/go-redsync/redsync/v4"
|
|
"github.com/go-redsync/redsync/v4/redis/goredis/v9"
|
|
)
|
|
|
|
const redisLockKeyPrefix = "gitea:globallock:"
|
|
|
|
// redisLockExpiry is the default expiry time for a lock.
|
|
// Define it as a variable to make it possible to change it in tests.
|
|
var redisLockExpiry = 30 * time.Second
|
|
|
|
type redisLocker struct {
|
|
rs *redsync.Redsync
|
|
|
|
mutexM sync.Map
|
|
closed atomic.Bool
|
|
extendWg sync.WaitGroup
|
|
}
|
|
|
|
var _ Locker = &redisLocker{}
|
|
|
|
func NewRedisLocker(connection string) Locker {
|
|
l := &redisLocker{
|
|
rs: redsync.New(
|
|
goredis.NewPool(
|
|
nosql.GetManager().GetRedisClient(connection),
|
|
),
|
|
),
|
|
}
|
|
|
|
l.extendWg.Add(1)
|
|
l.startExtend()
|
|
|
|
return l
|
|
}
|
|
|
|
func (l *redisLocker) Lock(ctx context.Context, key string) (context.Context, ReleaseFunc, error) {
|
|
return l.lock(ctx, key, 0)
|
|
}
|
|
|
|
func (l *redisLocker) TryLock(ctx context.Context, key string) (bool, context.Context, ReleaseFunc, error) {
|
|
ctx, f, err := l.lock(ctx, key, 1)
|
|
|
|
var (
|
|
errTaken *redsync.ErrTaken
|
|
errNodeTaken *redsync.ErrNodeTaken
|
|
)
|
|
if errors.As(err, &errTaken) || errors.As(err, &errNodeTaken) {
|
|
return false, ctx, f, nil
|
|
}
|
|
return err == nil, ctx, f, err
|
|
}
|
|
|
|
// Close closes the locker.
|
|
// It will stop extending the locks and refuse to acquire new locks.
|
|
// In actual use, it is not necessary to call this function.
|
|
// But it's useful in tests to release resources.
|
|
// It could take some time since it waits for the extending goroutine to finish.
|
|
func (l *redisLocker) Close() error {
|
|
l.closed.Store(true)
|
|
l.extendWg.Wait()
|
|
return nil
|
|
}
|
|
|
|
type redisMutex struct {
|
|
mutex *redsync.Mutex
|
|
cancel context.CancelCauseFunc
|
|
}
|
|
|
|
func (l *redisLocker) lock(ctx context.Context, key string, tries int) (context.Context, ReleaseFunc, error) {
|
|
if l.closed.Load() {
|
|
return ctx, func() context.Context { return ctx }, fmt.Errorf("locker is closed")
|
|
}
|
|
|
|
originalCtx := ctx
|
|
|
|
options := []redsync.Option{
|
|
redsync.WithExpiry(redisLockExpiry),
|
|
}
|
|
if tries > 0 {
|
|
options = append(options, redsync.WithTries(tries))
|
|
}
|
|
mutex := l.rs.NewMutex(redisLockKeyPrefix+key, options...)
|
|
if err := mutex.LockContext(ctx); err != nil {
|
|
return ctx, func() context.Context { return originalCtx }, err
|
|
}
|
|
|
|
ctx, cancel := context.WithCancelCause(ctx)
|
|
|
|
l.mutexM.Store(key, &redisMutex{
|
|
mutex: mutex,
|
|
cancel: cancel,
|
|
})
|
|
|
|
releaseOnce := sync.Once{}
|
|
return ctx, func() context.Context {
|
|
releaseOnce.Do(func() {
|
|
l.mutexM.Delete(key)
|
|
|
|
// It's safe to ignore the error here,
|
|
// if it failed to unlock, it will be released automatically after the lock expires.
|
|
// Do not call mutex.UnlockContext(ctx) here, or it will fail to release when ctx has timed out.
|
|
_, _ = mutex.Unlock()
|
|
|
|
cancel(ErrLockReleased)
|
|
})
|
|
return originalCtx
|
|
}, nil
|
|
}
|
|
|
|
func (l *redisLocker) startExtend() {
|
|
if l.closed.Load() {
|
|
l.extendWg.Done()
|
|
return
|
|
}
|
|
|
|
toExtend := make([]*redisMutex, 0)
|
|
l.mutexM.Range(func(_, value any) bool {
|
|
m := value.(*redisMutex)
|
|
|
|
// Extend the lock if it is not expired.
|
|
// Although the mutex will be removed from the map before it is released,
|
|
// it still can be expired because of a failed extension.
|
|
// If it happens, the cancel function should have been called,
|
|
// so it does not need to be extended anymore.
|
|
if time.Now().After(m.mutex.Until()) {
|
|
return true
|
|
}
|
|
|
|
toExtend = append(toExtend, m)
|
|
return true
|
|
})
|
|
for _, v := range toExtend {
|
|
if ok, err := v.mutex.Extend(); !ok {
|
|
v.cancel(err)
|
|
}
|
|
}
|
|
|
|
time.AfterFunc(redisLockExpiry/2, l.startExtend)
|
|
}
|