Merge branch 'master' into feat/approval-new

# Conflicts:
#	models/error.go
#	models/migrations/migrations.go
#	models/models.go
#	public/js/index.js
This commit is contained in:
Jonas Franz 2018-05-19 18:17:01 +02:00
commit a8dc699e74
No known key found for this signature in database
GPG Key ID: 506AEEBE80BEDECD
251 changed files with 41347 additions and 145 deletions

View File

@ -288,7 +288,7 @@ RESET_PASSWD_CODE_LIVE_MINUTES = 180
REGISTER_EMAIL_CONFIRM = false
; Disallow registration, only allow admins to create accounts.
DISABLE_REGISTRATION = false
; Allow registration only using third part services, it works only when DISABLE_REGISTRATION is false
; Allow registration only using third part services, it works only when DISABLE_REGISTRATION is false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
; User must sign in to view anything.
REQUIRE_SIGNIN_VIEW = false
@ -570,6 +570,14 @@ MAX_RESPONSE_ITEMS = 50
LANGS = en-US,zh-CN,zh-HK,zh-TW,de-DE,fr-FR,nl-NL,lv-LV,ru-RU,ja-JP,es-ES,pt-BR,pl-PL,bg-BG,it-IT,fi-FI,tr-TR,cs-CZ,sr-SP,sv-SE,ko-KR
NAMES = English,简体中文,繁體中文(香港),繁體中文(台灣),Deutsch,français,Nederlands,latviešu,русский,日本語,español,português do Brasil,polski,български,italiano,suomi,Türkçe,čeština,српски,svenska,한국어
[U2F]
; Two Factor authentication with security keys
; https://developers.yubico.com/U2F/App_ID.html
APP_ID = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s
; Comma seperated list of truisted facets
TRUSTED_FACETS = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s
; Used for datetimepicker
[i18n.datelang]
en-US = en

View File

@ -272,6 +272,10 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
- `MAX_GIT_DIFF_FILES`: **100**: Max number of files shown in diff view.
- `GC_ARGS`: **\<empty\>**: Arguments for command `git gc`, e.g. `--aggressive --auto`.
## U2F (`U2F`)
- `APP_ID`: **`ROOT_URL`**: Declares the facet of the application. Requires HTTPS.
- `TRUSTED_FACETS`: List of additional facets which are trusted. This is not support by all browsers.
## Markup (`markup`)
Gitea can support Markup using external tools. The example below will add a markup named `asciidoc`.

View File

@ -535,6 +535,15 @@ _Symbols used in table:_
<td></td>
<td></td>
</tr>
<tr>
<td>FIDO U2F (2FA)</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>Webhook support</td>
<td></td>

View File

@ -0,0 +1,60 @@
---
date: "2018-05-11T11:00:00+02:00"
title: "Usage: Setup fail2ban"
slug: "fail2ban-setup"
weight: 16
toc: true
draft: false
menu:
sidebar:
parent: "usage"
name: "Fail2ban setup"
weight: 16
identifier: "fail2ban-setup"
---
# Fail2ban setup to block users after failed login attemts
**Remember that fail2ban is powerful and can cause lots of issues if you do it incorrectly, so make
sure to test this before relying on it so you don't lock yourself out.**
Gitea returns an HTTP 200 for bad logins in the web logs, but if you have logging options on in
`app.ini`, then you should be able to go off of log/gitea.log, which gives you something like this
on a bad authentication:
```log
2018/04/26 18:15:54 [I] Failed authentication attempt for user from xxx.xxx.xxx.xxx
```
So we set our filter in `/etc/fail2ban/filter.d/gitea.conf`:
```ini
# gitea.conf
[Definition]
failregex = .*Failed authentication attempt for .* from <HOST>
ignoreregex =
```
And configure it in `/etc/fail2ban/jail.d/jail.local`:
```ini
[gitea]
enabled = true
port = http,https
filter = gitea
logpath = /home/git/gitea/log/gitea.log
maxretry = 10
findtime = 3600
bantime = 900
action = iptables-allports
```
Make sure and read up on fail2ban and configure it to your needs, this bans someone
for **15 minutes** (from all ports) when they fail authentication 10 times in an hour.
If you run Gitea behind a reverse proxy with nginx (for example with docker), you need to add
this to your nginx configuration so that IPs don't show up as 127.0.0.1:
```
proxy_set_header X-Real-IP $remote_addr;
```

View File

@ -1238,6 +1238,28 @@ func (err ErrExternalLoginUserNotExist) Error() string {
return fmt.Sprintf("external login user link does not exists [userID: %d, loginSourceID: %d]", err.UserID, err.LoginSourceID)
}
// ____ ________________________________ .__ __ __ .__
// | | \_____ \_ _____/\______ \ ____ ____ |__| _______/ |_____________ _/ |_|__| ____ ____
// | | // ____/| __) | _// __ \ / ___\| |/ ___/\ __\_ __ \__ \\ __\ |/ _ \ / \
// | | // \| \ | | \ ___// /_/ > |\___ \ | | | | \// __ \| | | ( <_> ) | \
// |______/ \_______ \___ / |____|_ /\___ >___ /|__/____ > |__| |__| (____ /__| |__|\____/|___| /
// \/ \/ \/ \/_____/ \/ \/ \/
// ErrU2FRegistrationNotExist represents a "ErrU2FRegistrationNotExist" kind of error.
type ErrU2FRegistrationNotExist struct {
ID int64
}
func (err ErrU2FRegistrationNotExist) Error() string {
return fmt.Sprintf("U2F registration does not exist [id: %d]", err.ID)
}
// IsErrU2FRegistrationNotExist checks if an error is a ErrU2FRegistrationNotExist.
func IsErrU2FRegistrationNotExist(err error) bool {
_, ok := err.(ErrU2FRegistrationNotExist)
return ok
}
// __________ .__
// \______ \ _______ _|__| ______ _ __
// | _// __ \ \/ / |/ __ \ \/ \/ /

View File

@ -0,0 +1,7 @@
-
id: 1
name: "U2F Key"
user_id: 1
counter: 0
created_unix: 946684800
updated_unix: 946684800

View File

@ -182,6 +182,8 @@ var migrations = []Migration{
NewMigration("add language column for user setting", addLanguageSetting),
// v64 -> v65
NewMigration("add multiple assignees", addMultipleAssignees),
// v65 -> v66
NewMigration("add u2f", addU2FReg),
// v66 -> v67
NewMigration("add review", addReview),
}

19
models/migrations/v65.go Normal file
View File

@ -0,0 +1,19 @@
package migrations
import (
"code.gitea.io/gitea/modules/util"
"github.com/go-xorm/xorm"
)
func addU2FReg(x *xorm.Engine) error {
type U2FRegistration struct {
ID int64 `xorm:"pk autoincr"`
Name string
UserID int64 `xorm:"INDEX"`
Raw []byte
Counter uint32
CreatedUnix util.TimeStamp `xorm:"INDEX created"`
UpdatedUnix util.TimeStamp `xorm:"INDEX updated"`
}
return x.Sync2(&U2FRegistration{})
}

View File

@ -120,6 +120,7 @@ func init() {
new(LFSLock),
new(Reaction),
new(IssueAssignees),
new(U2FRegistration),
new(Review),
)

120
models/u2f.go Normal file
View File

@ -0,0 +1,120 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/util"
"github.com/tstranex/u2f"
)
// U2FRegistration represents the registration data and counter of a security key
type U2FRegistration struct {
ID int64 `xorm:"pk autoincr"`
Name string
UserID int64 `xorm:"INDEX"`
Raw []byte
Counter uint32
CreatedUnix util.TimeStamp `xorm:"INDEX created"`
UpdatedUnix util.TimeStamp `xorm:"INDEX updated"`
}
// TableName returns a better table name for U2FRegistration
func (reg U2FRegistration) TableName() string {
return "u2f_registration"
}
// Parse will convert the db entry U2FRegistration to an u2f.Registration struct
func (reg *U2FRegistration) Parse() (*u2f.Registration, error) {
r := new(u2f.Registration)
return r, r.UnmarshalBinary(reg.Raw)
}
func (reg *U2FRegistration) updateCounter(e Engine) error {
_, err := e.ID(reg.ID).Cols("counter").Update(reg)
return err
}
// UpdateCounter will update the database value of counter
func (reg *U2FRegistration) UpdateCounter() error {
return reg.updateCounter(x)
}
// U2FRegistrationList is a list of *U2FRegistration
type U2FRegistrationList []*U2FRegistration
// ToRegistrations will convert all U2FRegistrations to u2f.Registrations
func (list U2FRegistrationList) ToRegistrations() []u2f.Registration {
regs := make([]u2f.Registration, len(list))
for _, reg := range list {
r, err := reg.Parse()
if err != nil {
log.Fatal(4, "parsing u2f registration: %v", err)
continue
}
regs = append(regs, *r)
}
return regs
}
func getU2FRegistrationsByUID(e Engine, uid int64) (U2FRegistrationList, error) {
regs := make(U2FRegistrationList, 0)
return regs, e.Where("user_id = ?", uid).Find(&regs)
}
// GetU2FRegistrationByID returns U2F registration by id
func GetU2FRegistrationByID(id int64) (*U2FRegistration, error) {
return getU2FRegistrationByID(x, id)
}
func getU2FRegistrationByID(e Engine, id int64) (*U2FRegistration, error) {
reg := new(U2FRegistration)
if found, err := e.ID(id).Get(reg); err != nil {
return nil, err
} else if !found {
return nil, ErrU2FRegistrationNotExist{ID: id}
}
return reg, nil
}
// GetU2FRegistrationsByUID returns all U2F registrations of the given user
func GetU2FRegistrationsByUID(uid int64) (U2FRegistrationList, error) {
return getU2FRegistrationsByUID(x, uid)
}
func createRegistration(e Engine, user *User, name string, reg *u2f.Registration) (*U2FRegistration, error) {
raw, err := reg.MarshalBinary()
if err != nil {
return nil, err
}
r := &U2FRegistration{
UserID: user.ID,
Name: name,
Counter: 0,
Raw: raw,
}
_, err = e.InsertOne(r)
if err != nil {
return nil, err
}
return r, nil
}
// CreateRegistration will create a new U2FRegistration from the given Registration
func CreateRegistration(user *User, name string, reg *u2f.Registration) (*U2FRegistration, error) {
return createRegistration(x, user, name, reg)
}
// DeleteRegistration will delete U2FRegistration
func DeleteRegistration(reg *U2FRegistration) error {
return deleteRegistration(x, reg)
}
func deleteRegistration(e Engine, reg *U2FRegistration) error {
_, err := e.Delete(reg)
return err
}

61
models/u2f_test.go Normal file
View File

@ -0,0 +1,61 @@
package models
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tstranex/u2f"
)
func TestGetU2FRegistrationByID(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
res, err := GetU2FRegistrationByID(1)
assert.NoError(t, err)
assert.Equal(t, "U2F Key", res.Name)
_, err = GetU2FRegistrationByID(342432)
assert.Error(t, err)
assert.True(t, IsErrU2FRegistrationNotExist(err))
}
func TestGetU2FRegistrationsByUID(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
res, err := GetU2FRegistrationsByUID(1)
assert.NoError(t, err)
assert.Len(t, res, 1)
assert.Equal(t, "U2F Key", res[0].Name)
}
func TestU2FRegistration_TableName(t *testing.T) {
assert.Equal(t, "u2f_registration", U2FRegistration{}.TableName())
}
func TestU2FRegistration_UpdateCounter(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
reg := AssertExistsAndLoadBean(t, &U2FRegistration{ID: 1}).(*U2FRegistration)
reg.Counter = 1
assert.NoError(t, reg.UpdateCounter())
AssertExistsIf(t, true, &U2FRegistration{ID: 1, Counter: 1})
}
func TestCreateRegistration(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
user := AssertExistsAndLoadBean(t, &User{ID: 1}).(*User)
res, err := CreateRegistration(user, "U2F Created Key", &u2f.Registration{Raw: []byte("Test")})
assert.NoError(t, err)
assert.Equal(t, "U2F Created Key", res.Name)
assert.Equal(t, []byte("Test"), res.Raw)
AssertExistsIf(t, true, &U2FRegistration{Name: "U2F Created Key", UserID: user.ID})
}
func TestDeleteRegistration(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
reg := AssertExistsAndLoadBean(t, &U2FRegistration{ID: 1}).(*U2FRegistration)
assert.NoError(t, DeleteRegistration(reg))
AssertNotExistsBean(t, &U2FRegistration{ID: 1})
}

View File

@ -211,3 +211,23 @@ type TwoFactorScratchAuthForm struct {
func (f *TwoFactorScratchAuthForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors {
return validate(errs, ctx.Data, f, ctx.Locale)
}
// U2FRegistrationForm for reserving an U2F name
type U2FRegistrationForm struct {
Name string `binding:"Required"`
}
// Validate valideates the fields
func (f *U2FRegistrationForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors {
return validate(errs, ctx.Data, f, ctx.Locale)
}
// U2FDeleteForm for deleting U2F keys
type U2FDeleteForm struct {
ID int64 `binding:"Required"`
}
// Validate valideates the fields
func (f *U2FDeleteForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors {
return validate(errs, ctx.Data, f, ctx.Locale)
}

View File

@ -521,6 +521,11 @@ var (
MaxResponseItems: 50,
}
U2F = struct {
AppID string
TrustedFacets []string
}{}
// I18n settings
Langs []string
Names []string
@ -1135,6 +1140,9 @@ func NewContext() {
IsInputFile: sec.Key("IS_INPUT_FILE").MustBool(false),
})
}
sec = Cfg.Section("U2F")
U2F.TrustedFacets, _ = shellquote.Split(sec.Key("TRUSTED_FACETS").MustString(strings.TrimRight(AppURL, "/")))
U2F.AppID = sec.Key("APP_ID").MustString(strings.TrimRight(AppURL, "/"))
}
// Service settings

View File

@ -8,6 +8,7 @@ Aleksejs Grocevs <aleksejs AT grocevs DOT pro>
Aleksey Tarakin <hukendo AT yandex DOT ru>
Alexander Steinhöfer <kontakt AT lx-s DOT de>
Alexandre Magno <alexandre DOT mbm AT gmail DOT com>
Anderi Azuki <k1nzy AT outlook DOT com>
Andrey Nering <andrey AT nering DOT com DOT br>
Andrey Solomatin <toadron AT yandex DOT ru>
Antoine GIRARD <sapk AT sapk DOT fr>

View File

@ -16,6 +16,7 @@ signed_in_as=Вписан като
username=Потребител
password=Парола
repository=Хранилище
organization=Организация
mirror=Огледало
@ -181,6 +182,7 @@ delete_token=Изтрий
delete_account=Изтриване на собствения профил
confirm_delete_account=Потвърди изтриването

View File

@ -16,6 +16,7 @@ signed_in_as=Přihlášen jako
username=Uživatelské jméno
password=Heslo
repository=Repozitář
organization=Organizace
mirror=Zrcadlo
@ -180,6 +181,7 @@ delete_token=Smazat
delete_account=Smazat váš účet
confirm_delete_account=Potvrdit smazání

View File

@ -31,6 +31,7 @@ twofa=Zwei-Faktor-Authentifizierung
twofa_scratch=Zwei-Faktor-Einmalpasswort
passcode=PIN
repository=Repository
organization=Organisation
mirror=Mirror
@ -306,11 +307,13 @@ form.name_pattern_not_allowed='%s' ist nicht erlaubt für Benutzernamen.
[settings]
profile=Profil
account=Account
password=Passwort
security=Sicherheit
avatar=Profilbild
ssh_gpg_keys=SSH / GPG Schlüssel
social=Soziale Konten
applications=Anwendungen
orgs=Organisationen verwalten
repos=Repositories
delete=Konto löschen
@ -447,6 +450,7 @@ then_enter_passcode=Und gebe dann die angezeigte PIN der Anwendung ein:
passcode_invalid=Die PIN ist falsch. Probiere es erneut.
twofa_enrolled=Die Zwei-Faktor-Authentifizierung wurde für dein Konto aktiviert. Bewahre dein Einmalpasswort (%s) an einem sicheren Ort auf, da es nicht wieder angezeigt werden wird.
manage_account_links=Verknüpfte Accounts verwalten
manage_account_links_desc=Diese externen Accounts sind mit deinem Gitea-Account verknüpft.
account_links_not_available=Es sind keine externen Accounts mit diesem Gitea-Account verknüpft.
@ -998,6 +1002,16 @@ settings.event_send_everything=Alle Events
settings.event_choose=Benutzerdefinierte Events…
settings.event_create=Erstellen
settings.event_create_desc=Branch oder Tag erstellt.
settings.event_delete=Löschen
settings.event_delete_desc=Branch/Tag gelöscht
settings.event_fork=Fork
settings.event_fork_desc=Repository geforkt
settings.event_issues=Issues
settings.event_issues_desc=Issue geöffnet, geschlossen, wieder geöffnet, bearbeitet, zugewiesen, nicht zugewiesen, Label aktualisiert, Label gelöscht, einem Meilenstein zugewiesen oder davon entfernt.
settings.event_issue_comment=Issue-Kommentar
settings.event_issue_comment_desc=Issue-Kommentar angelegt, geändert oder gelöscht.
settings.event_release=Release
settings.event_release_desc=Release in Repository veröffentlicht.
settings.event_pull_request=Pull-Request
settings.event_pull_request_desc=Pull-Request geöffnet, geschlossen, wieder geöffnet, bearbeitet, zugewiesen, nicht zugewiesen, Label aktualisiert, Label gelöscht oder synchronisiert.
settings.event_push=Push

View File

@ -31,6 +31,19 @@ twofa = Two-Factor Authentication
twofa_scratch = Two-Factor Scratch Code
passcode = Passcode
u2f_insert_key = Insert your security key
u2f_sign_in = Press the button on your security key. If you can't find a button, re-insert it.
u2f_press_button = Please press the button on your security key…
u2f_use_twofa = Use a two-factor code from your phone
u2f_error = We can't read your security key!
u2f_unsupported_browser = Your browser don't support U2F keys. Please try another browser.
u2f_error_1 = An unknown error occured. Please retry.
u2f_error_2 = Please make sure that you're using an encrypted connection (https://) and visiting the correct URL.
u2f_error_3 = The server could not proceed your request.
u2f_error_4 = The presented key is not eligible for this request. If you try to register it, make sure that the key isn't already registered.
u2f_error_5 = Timeout reached before your key could be read. Please reload to retry.
u2f_reload = Reload
repository = Repository
organization = Organization
mirror = Mirror
@ -324,6 +337,7 @@ twofa = Two-Factor Authentication
account_link = Linked Accounts
organization = Organizations
uid = Uid
u2f = Security Keys
public_profile = Public Profile
profile_desc = Your email address will be used for notifications and other operations.
@ -453,6 +467,14 @@ then_enter_passcode = And enter the passcode shown in the application:
passcode_invalid = The passcode is incorrect. Try again.
twofa_enrolled = Your account has been enrolled into two-factor authentication. Store your scratch token (%s) in a safe place as it is only shown once!
u2f_desc = Security keys are hardware devices containing cryptograhic keys. They could be used for two factor authentication. The security key must support the <a href="https://fidoalliance.org/">FIDO U2F</a> standard.
u2f_require_twofa = Two-Factor-Authentication must be enrolled in order to use security keys.
u2f_register_key = Add Security Key
u2f_nickname = Nickname
u2f_press_button = Press the button on your security key to register it.
u2f_delete_key = Remove Security Key
u2f_delete_key_desc= If you remove a security key you cannot login with it anymore. Are you sure?
manage_account_links = Manage Linked Accounts
manage_account_links_desc = These external accounts are linked to your Gitea account.
account_links_not_available = There are currently no external accounts linked to your Gitea account.

View File

@ -19,6 +19,7 @@ username=Nombre de usuario
password=Contraseña
passcode=Contraseña
repository=Repositorio
organization=Organización
mirror=Réplica
@ -233,6 +234,7 @@ scan_this_image=Analiza esta imagen con la aplicación de autenticación:
or_enter_secret=O introduzca el secreto: %s
orgs_none=No eres miembro de ninguna organización.
repos_none=No posees ningún repositorio

View File

@ -18,6 +18,7 @@ username=Käyttäjätunnus
password=Salasana
passcode=Tunnuskoodi
repository=Repo
organization=Organisaatio
mirror=Peili
@ -180,6 +181,7 @@ twofa_disabled=Kaksivaiheinen todennus on otettu pois käytöstä.
delete_account=Poista tilisi
confirm_delete_account=Varmista poisto

View File

@ -19,6 +19,7 @@ username=Nom d'utilisateur
password=Mot de passe
passcode=Code d'accès
repository=Dépôt
organization=Organisation
mirror=Miroir
@ -236,6 +237,7 @@ scan_this_image=Scannez cette image avec votre application d'authentification :
or_enter_secret=Ou saisissez le code secret: %s
orgs_none=Vous n'êtes membre d'aucune organisation.
repos_none=Vous ne possédez aucun dépôt

View File

@ -20,6 +20,7 @@ username=Felhasználónév
password=Jelszó
passcode=Jelkód
repository=Tároló
organization=Szervezet
mirror=Tükör
@ -240,6 +241,7 @@ scan_this_image=Olvassa be ezt a hitelesítő alkalmazásával:
or_enter_secret=Vagy adja meg a titkot: %s
orgs_none=Nem tagja egy szervezetnek sem.
repos_none=Nincsen egyetlen saját tárolója sem

View File

@ -19,6 +19,7 @@ username=Nama Pengguna
password=Kata Sandi
passcode=Kode Akses
repository=Repositori
organization=Organisasi
mirror=Duplikat
@ -236,6 +237,7 @@ scan_this_image=Pindai gambar ini dengan aplikasi otentikasi:
or_enter_secret=Atau masukkan rahasia: %s
orgs_none=Anda bukan anggota dari organisasi apapun.
repos_none=Anda tidak memiliki repositori apapun

View File

@ -19,6 +19,7 @@ username=Nome utente
password=Password
passcode=Codice di sicurezza
repository=Repository
organization=Organizzazione
mirror=Mirror
@ -233,6 +234,7 @@ twofa_disabled=L'autenticazione a due fattori è stata disattivata.
or_enter_secret=O immettere il segreto: %s
repos_none=Non possiedi alcun repository
delete_account=Elimina Account

View File

@ -20,6 +20,7 @@ username=ユーザ名
password=パスワード
passcode=パスコード
repository=リポジトリ
organization=組織
mirror=ミラー
@ -240,6 +241,7 @@ scan_this_image=この画像を認証アプリケーションで読み取って
or_enter_secret=または、シークレットを入力してください: %s
orgs_none=あなたはどの組織のメンバーでもありません。
repos_none=あなたはリポジトリを所有していません。

View File

@ -19,6 +19,7 @@ username=사용자명
password=비밀번호
passcode=인증코드
repository=저장소
organization=조직
mirror=미러
@ -223,6 +224,7 @@ scan_this_image=이 이미지를 당신의 인증 애플리케이션에서 스
or_enter_secret=또는 이 비밀키를 입력하세요: %s
orgs_none=당신은 어떤 조직의 구성원도 아닙니다.
repos_none=어떤 레포지터리도 존재하지 않습니다.

View File

@ -18,6 +18,7 @@ username=Vartotojo vardas
password=Slaptažodis
passcode=PIN kodas
repository=Saugykla
organization=Organizacija
mirror=Veidrodis
@ -159,6 +160,7 @@ delete_token=Pašalinti
[repo]
repo_desc=Aprašymas
repo_lang=Kalba

View File

@ -20,6 +20,7 @@ username=Lietotājvārds
password=Parole
passcode=Kods
repository=Repozitorijs
organization=Organizācija
mirror=Spogulis
@ -240,6 +241,7 @@ scan_this_image=Noskenējiet šo attēlu ar autentifikācijas lietojumprogrammu:
or_enter_secret=Vai ievadiet šo noslēpumu: %s
orgs_none=Jūs neesat nevienas organizācijas biedrs.
repos_none=Jums nepieder neviens repozitorijs

View File

@ -19,6 +19,7 @@ username=Brukernavn
password=Passord
passcode=Kode
repository=Kodelager
organization=Organisasjon
mirror=Speil
@ -94,6 +95,7 @@ admin_password=Passord
[repo]

View File

@ -20,6 +20,7 @@ username=Gebruikersnaam
password=Wachtwoord
passcode=PIN
repository=Repository
organization=Organisatie
mirror=Kopie
@ -240,6 +241,7 @@ scan_this_image=Scan deze afbeelding met je authenticatie applicatie:
or_enter_secret=Of voer deze geheime code in: %s
orgs_none=U bent geen lid van een organisatie.
repos_none=U bezit geen repositories

View File

@ -5,6 +5,7 @@
[install]
@ -44,6 +45,7 @@
[repo]

View File

@ -18,6 +18,7 @@ username=Brukernavn
password=Passord
passcode=Kode
repository=Kodelager
organization=Organisasjon
mirror=Speil
@ -82,6 +83,7 @@ smtp_host=SMTP-vert
[repo]

View File

@ -19,6 +19,7 @@ username=Nazwa użytkownika
password=Hasło
passcode=Kod dostępu
repository=Repozytorium
organization=Organizacja
mirror=Kopia lustrzana
@ -236,6 +237,7 @@ scan_this_image=Zeskanuj ten obraz za pomocą swojej aplikacji uwierzytelniają
or_enter_secret=Lub wprowadź sekret: %s
orgs_none=Nie jesteś członkiem żadnej organizacji.
repos_none=Nie posiadasz żadnych repozytoriów

View File

@ -31,6 +31,7 @@ twofa=Autenticação de dois fatores
twofa_scratch=Código de backup da autenticação de dois fatores
passcode=Senha
repository=Repositório
organization=Organização
mirror=Espelho
@ -449,6 +450,7 @@ then_enter_passcode=E insira a senha mostrada no aplicativo:
passcode_invalid=Esse código de acesso é inválido. Tente novamente.
twofa_enrolled=Sua conta foi inscrita na autenticação de dois fatores. Armazene seu token de backup (%s) em um local seguro, pois ele é exibido apenas uma vez!
manage_account_links=Gerenciar contas vinculadas
manage_account_links_desc=Estas contas externas estão vinculadas a sua conta de Gitea.
account_links_not_available=Não existem contas externas atualmente vinculadas a esta conta.
@ -1000,6 +1002,16 @@ settings.event_send_everything=Todos os eventos
settings.event_choose=Eventos personalizados...
settings.event_create=Criar
settings.event_create_desc=Branch ou tag criado.
settings.event_delete=Excluir
settings.event_delete_desc=Branch ou tag excluída
settings.event_fork=Fork
settings.event_fork_desc=Feito fork do repositório
settings.event_issues=Issues
settings.event_issues_desc=Issue aberta, fechada, reaberta, editada, atribuída, desatribuída, etiqueta atualizada, etiqueta limpa, marco definido ou marco indefinido.
settings.event_issue_comment=Comentário da issue
settings.event_issue_comment_desc=Comentário da issue criado, editado ou excluído.
settings.event_release=Versão
settings.event_release_desc=Versão publicada em um repositório.
settings.event_pull_request=Pull request
settings.event_pull_request_desc=Pull request aberto, fechado, reaberto, atribuído, desatribuído, teve etiqueta atualizada ou limpada ou foi sincronizado.
settings.event_push=Push

View File

@ -31,6 +31,7 @@ twofa=Двухфакторная аутентификация
twofa_scratch=Двухфакторный scratch-код
passcode=Пароль
repository=Репозиторий
organization=Организация
mirror=Зеркало
@ -355,6 +356,7 @@ scan_this_image=Сканируйте это изображение вашим п
or_enter_secret=Или введите кодовое слово: %s
orgs_none=Вы не состоите ни в одной организации.
repos_none=Вы не владеете репозиториями

View File

@ -5,6 +5,7 @@
[install]
@ -44,6 +45,7 @@
[repo]

View File

@ -16,6 +16,7 @@ signed_in_as=Пријављени сте као
username=Корисничко име
password=Лозинка
repository=Спремиште
organization=Организација
mirror=Огледало
@ -180,6 +181,7 @@ delete_token=Уклони
delete_account=Уклоните ваш налог
confirm_delete_account=Потврдите брисање

View File

@ -19,6 +19,7 @@ username=Användarnamn
password=Lösenord
passcode=Kod
repository=Utvecklingskatalog
organization=Organisation
mirror=Spegel
@ -236,6 +237,7 @@ scan_this_image=Skanna denna bild med ditt autentiseringsprogram:
or_enter_secret=Eller skriv in följande sträng: %s
orgs_none=Du är inte en medlem i någon organisation.
repos_none=Du har inga utvecklingskataloger associerade med ditt konto
@ -328,6 +330,7 @@ commits.message=Meddelande
commits.date=Datum
commits.older=Äldre
commits.newer=Nyare
commits.signed_by=Signerad av
issues.new=Nytt Ärende
@ -339,6 +342,7 @@ issues.new.no_milestone=Ingen Milsten
issues.new.clear_milestone=Rensa milstenar
issues.new.open_milestone=Öppna Milstenar
issues.new.closed_milestone=Stängda Milstenar
issues.no_ref=Ingen branch/Tag specificerad
issues.create=Skapa Ärende
issues.new_label=Ny etikett
issues.create_label=Skapa Etikett
@ -348,6 +352,7 @@ issues.label_templates.fail_to_load_file=Laddning av etikettmallen '%s' misslyck
issues.add_milestone_at=`lade till denna till milstolpe <b>%s</b> %s`
issues.change_milestone_at='modifierade milstolpen från <b>%s</b> till <b>%s</b> %s'
issues.remove_milestone_at='tog bort denna från milstolpen <b>%s</b> %s'
issues.deleted_milestone=`(raderad)`
issues.self_assign_at=`tilldelade denna till sig själv %s`
issues.add_assignee_at=`blev tilldelad denna av <b>%s</b> %s`
issues.remove_assignee_at=`tog bort sin tilldelning %s`
@ -375,6 +380,8 @@ issues.action_close=Stäng
issues.action_label=Etikett
issues.action_milestone=Milsten
issues.action_milestone_no_select=Ingen Milsten
issues.action_assignee=Tilldelad
issues.action_assignee_no_select=Ingen tilldelad
issues.opened_by=öppnade %[1]s av <a href="%[2]s">%[3]s</a>
issues.opened_by_fake=öppnade %[1]s av %[2]s
issues.previous=Föregående
@ -413,6 +420,8 @@ issues.attachment.open_tab=`Klicka för att se "%s" i en ny flik`
issues.attachment.download=`Klicka för att hämta "%s"`
issues.subscribe=Prenumerera
issues.unsubscribe=Avsluta prenumerationen
issues.start_tracking_short=Starta
issues.start_tracking_history=`började arbeta %s`
issues.add_time_hours=Timmar
issues.add_time_minutes=Minuter

View File

@ -19,6 +19,7 @@ username=Kullanıcı Adı
password=Parola
passcode=Şifre
repository=Depo
organization=Organizasyon
mirror=Yansıma
@ -234,6 +235,7 @@ scan_this_image=Kim doğrulama uygulamanızla bu görüntüyü tarayın:
or_enter_secret=Veya gizli şeyi girin: %s
orgs_none=Herhangi bir organizasyonun bir üyesi değilsiniz.
repos_none=Herhangi bir depoya sahip değilsiniz

View File

@ -31,6 +31,7 @@ twofa=Двофакторна авторизація
twofa_scratch=Двофакторний одноразовий пароль
passcode=Код доступу
repository=Репозиторій
organization=Організація
mirror=Дзеркало
@ -80,6 +81,7 @@ err_empty_admin_password=Пароль адміністратора не може
general_title=Загальні налаштування
app_name=Назва сайту
app_name_helper=Тут ви можете ввести назву своєї компанії.
repo_path=Кореневий шлях репозиторія
repo_path_helper=Всі вилучені Git репозиторії будуть збережені в цей каталог.
lfs_path=Кореневої шлях Git LFS
@ -127,6 +129,7 @@ default_keep_email_private=Приховати адресу електронно
default_keep_email_private_popup=Приховати адресу електронної пошти нових облікових записів за замовчуванням.
default_allow_create_organization=Дозволити створення організацій за замовчуванням
default_enable_timetracking=Увімкнути відстеження часу за замовчуванням
default_enable_timetracking_popup=Включити відстеження часу для нових репозиторіїв за замовчуванням.
no_reply_address=Прихований поштовий домен
[home]
@ -344,7 +347,9 @@ ssh_disabled=SSH вимкнено
manage_social=Керувати зв'язаними обліковими записами соціальних мереж
unbind=Від'єднати
manage_access_token=Керування токенами доступу
generate_new_token=Згенерувати новий токен
tokens_desc=Ці токени надають доступ до вашого облікового запису за допомогою Gitea API.
token_name=Ім'я токену
generate_token=Згенерувати токен
delete_token=Видалити
@ -352,6 +357,7 @@ delete_token=Видалити
twofa_disable=Вимкнути двофакторну автентифікацію
or_enter_secret=Або введіть секрет: %s
manage_account_links=Керування обліковими записами
remove_account_link=Видалити облікові записи
@ -458,6 +464,7 @@ editor.new_branch_name_desc=Ім'я нової гілки…
editor.cancel=Відміна
editor.branch_already_exists=Гілка '%s' вже присутня в репозиторії.
editor.fail_to_update_file=Не вдалося оновити/створити файл '%s' через помилку: %v
editor.unable_to_upload_files=Не вдалося завантажити файли до '%s' через помилку: %v
editor.upload_files_to_dir=Завантажувати файли до '%s'
commits.commits=Коміти
@ -497,6 +504,7 @@ issues.add_milestone_at=`додав(ла) до <b>%s</b> етапу %s`
issues.deleted_milestone=`(видалено)`
issues.add_assignee_at=`був призначений <b>%s</b> %s`
issues.remove_assignee_at=`видалили із призначених %s`
issues.change_title_at=`змінив(ла) заголовок з <b>%s</b> на <b>%s</b> %s`
issues.open_tab=%d відкрито
issues.close_tab=%d закрито
issues.filter_label=Мітка
@ -557,8 +565,8 @@ issues.label_edit=Редагувати
issues.label_delete=Видалити
issues.label_modify=Редагувати мітку
issues.label_deletion=Видалити мітку
issues.label.filter_sort.alphabetically=За абеткою
issues.label.filter_sort.reverse_alphabetically=Зворотною абеткою
issues.label.filter_sort.alphabetically=За алфавітом
issues.label.filter_sort.reverse_alphabetically=З кінця алфавіту
issues.label.filter_sort.by_size=Розмір
issues.label.filter_sort.reverse_by_size=Зворотний розмір
issues.num_participants=%d учасників
@ -578,7 +586,7 @@ issues.add_time_hours=Години
issues.add_time_minutes=Хвилини
issues.add_time_sum_to_small=Час не введено.
issues.cancel_tracking=Відміна
issues.due_date=Термін дії
issues.due_date=Дата завершення
issues.due_date_form_add=Додати дату завершення
issues.due_date_form_remove=Видалити дату завершення
issues.due_date_not_set=Термін виконання не встановлений.
@ -589,6 +597,7 @@ pulls.compare_base=злити в
pulls.compare_compare=pull з
pulls.filter_branch=Фільтр по гілці
pulls.no_results=Результатів не знайдено.
pulls.nothing_to_compare=Ці гілки однакові. Немає необхідності створювати запитів на злиття.
pulls.create=Створити запит на злиття
pulls.title_desc=хоче злити %[1]d комітів з <code>%[2]s</code> до <code>%[3]s</code>
pulls.merged_title_desc=злито %[1]d комітів з <code>%[2]s</code> до <code>%[3]s</code> %[4]s
@ -600,6 +609,7 @@ pulls.merged=Злито
pulls.has_merged=Запит на злиття було об'єднано.
pulls.can_auto_merge_desc=Цей запит можна об'єднати автоматично.
pulls.merge_pull_request=Об'єднати запит на злиття
pulls.squash_merge_pull_request=Об'єднати і злити
milestones.new=Новий етап
milestones.open_tab=%d відкрито
@ -694,12 +704,15 @@ settings.wiki_desc=Увімкнути репозиторії Wiki
settings.use_internal_wiki=Використовувати вбудовані Wiki
settings.use_external_wiki=Використовувати зовнішні Wiki
settings.external_wiki_url=URL зовнішньої wiki
settings.issues_desc=Увімкнути відстеження проблем в репозиторію
settings.external_tracker_url=URL зовнішньої системи відстеження проблем
settings.tracker_url_format=Формат URL зовнішнього трекера задач
settings.tracker_issue_style.numeric=Цифровий
settings.tracker_issue_style.alphanumeric=Буквено-цифровий
settings.enable_timetracker=Увімкнути відстеження часу
settings.pulls.allow_squash_commits=Увімкнути об'єднувати коміти перед злиттям
settings.admin_settings=Налаштування адміністратора
settings.admin_enable_health_check=Включити перевірки працездатності репозиторію (git fsck)
settings.danger_zone=Небезпечна зона
settings.new_owner_has_same_repo=Новий власник вже має репозиторій з такою назвою. Будь ласка, виберіть інше ім'я.
settings.convert=Перетворити на звичайний репозиторій

View File

@ -31,6 +31,7 @@ twofa=两步验证
twofa_scratch=两步验证口令
passcode=验证码
repository=仓库
organization=组织
mirror=镜像
@ -449,6 +450,7 @@ then_enter_passcode=并输入应用程序中显示的密码:
passcode_invalid=密码不正确。再试一次。
twofa_enrolled=你的账号已经启用了两步验证。请保存初始令牌(%s到一个安全的地方此令牌仅当前显示一次。
manage_account_links=管理绑定过的账号
manage_account_links_desc=这些外部帐户已经绑定到您的 Gitea 帐户。
account_links_not_available=当前没有与您的 Gitea 帐户绑定的外部帐户。

View File

@ -17,6 +17,7 @@ notifications=訊息
password=密碼
passcode=驗證碼
repository=儲存庫
organization=組織
mirror=鏡像
@ -228,6 +229,7 @@ scan_this_image=使用您的授權應用程式來掃瞄圖片:
or_enter_secret=或者輸入密碼: %s
orgs_none=您尚未成為任一組織的成員。
repos_none=您不擁有任何存儲庫

View File

@ -19,6 +19,7 @@ username=用戶名稱
password=密碼
passcode=驗證碼
repository=儲存庫
organization=組織
mirror=鏡像
@ -236,6 +237,7 @@ scan_this_image=使用您的授權應用程式來掃瞄圖片:
or_enter_secret=或者輸入密碼: %s
orgs_none=您尚未成為任一組織的成員。
repos_none=您不擁有任何存儲庫

View File

@ -1513,6 +1513,130 @@ function initCodeView() {
}
}
function initU2FAuth() {
if($('#wait-for-key').length === 0) {
return
}
u2fApi.ensureSupport()
.then(function () {
$.getJSON('/user/u2f/challenge').success(function(req) {
u2fApi.sign(req.appId, req.challenge, req.registeredKeys, 30)
.then(u2fSigned)
.catch(function (err) {
if(err === undefined) {
u2fError(1);
return
}
u2fError(err.metaData.code);
});
});
}).catch(function () {
// Fallback in case browser do not support U2F
window.location.href = "/user/two_factor"
})
}
function u2fSigned(resp) {
$.ajax({
url:'/user/u2f/sign',
type:"POST",
headers: {"X-Csrf-Token": csrf},
data: JSON.stringify(resp),
contentType:"application/json; charset=utf-8",
}).done(function(res){
window.location.replace(res);
}).fail(function (xhr, textStatus) {
u2fError(1);
});
}
function u2fRegistered(resp) {
if (checkError(resp)) {
return;
}
$.ajax({
url:'/user/settings/security/u2f/register',
type:"POST",
headers: {"X-Csrf-Token": csrf},
data: JSON.stringify(resp),
contentType:"application/json; charset=utf-8",
success: function(){
window.location.reload();
},
fail: function (xhr, textStatus) {
u2fError(1);
}
});
}
function checkError(resp) {
if (!('errorCode' in resp)) {
return false;
}
if (resp.errorCode === 0) {
return false;
}
u2fError(resp.errorCode);
return true;
}
function u2fError(errorType) {
var u2fErrors = {
'browser': $('#unsupported-browser'),
1: $('#u2f-error-1'),
2: $('#u2f-error-2'),
3: $('#u2f-error-3'),
4: $('#u2f-error-4'),
5: $('.u2f-error-5')
};
u2fErrors[errorType].removeClass('hide');
for(var type in u2fErrors){
if(type != errorType){
u2fErrors[type].addClass('hide');
}
}
$('#u2f-error').modal('show');
}
function initU2FRegister() {
$('#register-device').modal({allowMultiple: false});
$('#u2f-error').modal({allowMultiple: false});
$('#register-security-key').on('click', function(e) {
e.preventDefault();
u2fApi.ensureSupport()
.then(u2fRegisterRequest)
.catch(function() {
u2fError('browser');
})
})
}
function u2fRegisterRequest() {
$.post("/user/settings/security/u2f/request_register", {
"_csrf": csrf,
"name": $('#nickname').val()
}).success(function(req) {
$("#nickname").closest("div.field").removeClass("error");
$('#register-device').modal('show');
if(req.registeredKeys === null) {
req.registeredKeys = []
}
u2fApi.register(req.appId, req.registerRequests, req.registeredKeys, 30)
.then(u2fRegistered)
.catch(function (reason) {
if(reason === undefined) {
u2fError(1);
return
}
u2fError(reason.metaData.code);
});
}).fail(function(xhr, status, error) {
if(xhr.status === 409) {
$("#nickname").closest("div.field").addClass("error");
}
});
}
$(document).ready(function () {
csrf = $('meta[name=_csrf]').attr("content");
suburl = $('meta[name=_suburl]').attr("content");
@ -1724,6 +1848,8 @@ $(document).ready(function () {
initCtrlEnterSubmit();
initNavbarContentToggle();
initTopicbar();
initU2FAuth();
initU2FRegister();
initPullRequestReview();
// Repo clone url.

View File

@ -110,6 +110,11 @@
<td><a href="https://github.com/mozilla/pdf.js/blob/master/LICENSE">Apache-2.0-only</a></td>
<td><a href="https://github.com/mozilla/pdf.js/archive/v1.4.20.tar.gz">pdf.js-v1.4.20.tar.gz</a></td>
</tr>
<tr>
<td><a href="/vendor/plugins/u2f/">u2f-api</a></td>
<td><a href="https://github.com/go-gitea/u2f-api/blob/master/LICENSE">Expat</a></td>
<td><a href="https://github.com/go-gitea/u2f-api/archive/v1.0.8.zip">u2f-api-1.0.8.zip</a></td>
</tr>
<tr>
<td><a href="/vendor/assets/font-awesome/fonts/">font-awesome - fonts</a></td>
<td><a href="http://fontawesome.io/license/">OFL</a></td>

1
public/vendor/plugins/u2f/index.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -5,6 +5,8 @@
package routes
import (
"encoding/gob"
"net/http"
"os"
"path"
"time"
@ -37,12 +39,13 @@ import (
"github.com/go-macaron/i18n"
"github.com/go-macaron/session"
"github.com/go-macaron/toolbox"
"github.com/tstranex/u2f"
"gopkg.in/macaron.v1"
"net/http"
)
// NewMacaron initializes Macaron instance.
func NewMacaron() *macaron.Macaron {
gob.Register(&u2f.Challenge{})
m := macaron.New()
if !setting.DisableRouterLog {
m.Use(macaron.Logger())
@ -214,6 +217,12 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/scratch", user.TwoFactorScratch)
m.Post("/scratch", bindIgnErr(auth.TwoFactorScratchAuthForm{}), user.TwoFactorScratchPost)
})
m.Group("/u2f", func() {
m.Get("", user.U2F)
m.Get("/challenge", user.U2FChallenge)
m.Post("/sign", bindIgnErr(u2f.SignResponse{}), user.U2FSign)
})
}, reqSignOut)
m.Group("/user/settings", func() {
@ -235,6 +244,11 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/enroll", userSetting.EnrollTwoFactor)
m.Post("/enroll", bindIgnErr(auth.TwoFactorAuthForm{}), userSetting.EnrollTwoFactorPost)
})
m.Group("/u2f", func() {
m.Post("/request_register", bindIgnErr(auth.U2FRegistrationForm{}), userSetting.U2FRegister)
m.Post("/register", bindIgnErr(u2f.RegisterResponse{}), userSetting.U2FRegisterPost)
m.Post("/delete", bindIgnErr(auth.U2FDeleteForm{}), userSetting.U2FDelete)
})
m.Group("/openid", func() {
m.Post("", bindIgnErr(auth.AddOpenIDForm{}), userSetting.OpenIDPost)
m.Post("/delete", userSetting.DeleteOpenID)

View File

@ -21,6 +21,7 @@ import (
"github.com/go-macaron/captcha"
"github.com/markbates/goth"
"github.com/tstranex/u2f"
)
const (
@ -35,6 +36,7 @@ const (
tplTwofa base.TplName = "user/auth/twofa"
tplTwofaScratch base.TplName = "user/auth/twofa_scratch"
tplLinkAccount base.TplName = "user/auth/link_account"
tplU2F base.TplName = "user/auth/u2f"
)
// AutoSignIn reads cookie and try to auto-login.
@ -159,7 +161,6 @@ func SignInPost(ctx *context.Context, form auth.SignInForm) {
}
return
}
// If this user is enrolled in 2FA, we can't sign the user in just yet.
// Instead, redirect them to the 2FA authentication page.
_, err = models.GetTwoFactorByUID(u.ID)
@ -175,6 +176,13 @@ func SignInPost(ctx *context.Context, form auth.SignInForm) {
// User needs to use 2FA, save data and redirect to 2FA page.
ctx.Session.Set("twofaUid", u.ID)
ctx.Session.Set("twofaRemember", form.Remember)
regs, err := models.GetU2FRegistrationsByUID(u.ID)
if err == nil && len(regs) > 0 {
ctx.Redirect(setting.AppSubURL + "/user/u2f")
return
}
ctx.Redirect(setting.AppSubURL + "/user/two_factor")
}
@ -317,12 +325,115 @@ func TwoFactorScratchPost(ctx *context.Context, form auth.TwoFactorScratchAuthFo
ctx.RenderWithErr(ctx.Tr("auth.twofa_scratch_token_incorrect"), tplTwofaScratch, auth.TwoFactorScratchAuthForm{})
}
// U2F shows the U2F login page
func U2F(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("twofa")
ctx.Data["RequireU2F"] = true
// Check auto-login.
if checkAutoLogin(ctx) {
return
}
// Ensure user is in a 2FA session.
if ctx.Session.Get("twofaUid") == nil {
ctx.ServerError("UserSignIn", errors.New("not in U2F session"))
return
}
ctx.HTML(200, tplU2F)
}
// U2FChallenge submits a sign challenge to the browser
func U2FChallenge(ctx *context.Context) {
// Ensure user is in a U2F session.
idSess := ctx.Session.Get("twofaUid")
if idSess == nil {
ctx.ServerError("UserSignIn", errors.New("not in U2F session"))
return
}
id := idSess.(int64)
regs, err := models.GetU2FRegistrationsByUID(id)
if err != nil {
ctx.ServerError("UserSignIn", err)
return
}
if len(regs) == 0 {
ctx.ServerError("UserSignIn", errors.New("no device registered"))
return
}
challenge, err := u2f.NewChallenge(setting.U2F.AppID, setting.U2F.TrustedFacets)
if err = ctx.Session.Set("u2fChallenge", challenge); err != nil {
ctx.ServerError("UserSignIn", err)
return
}
ctx.JSON(200, challenge.SignRequest(regs.ToRegistrations()))
}
// U2FSign authenticates the user by signResp
func U2FSign(ctx *context.Context, signResp u2f.SignResponse) {
challSess := ctx.Session.Get("u2fChallenge")
idSess := ctx.Session.Get("twofaUid")
if challSess == nil || idSess == nil {
ctx.ServerError("UserSignIn", errors.New("not in U2F session"))
return
}
challenge := challSess.(*u2f.Challenge)
id := idSess.(int64)
regs, err := models.GetU2FRegistrationsByUID(id)
if err != nil {
ctx.ServerError("UserSignIn", err)
return
}
for _, reg := range regs {
r, err := reg.Parse()
if err != nil {
log.Fatal(4, "parsing u2f registration: %v", err)
continue
}
newCounter, authErr := r.Authenticate(signResp, *challenge, reg.Counter)
if authErr == nil {
reg.Counter = newCounter
user, err := models.GetUserByID(id)
if err != nil {
ctx.ServerError("UserSignIn", err)
return
}
remember := ctx.Session.Get("twofaRemember").(bool)
if err := reg.UpdateCounter(); err != nil {
ctx.ServerError("UserSignIn", err)
return
}
if ctx.Session.Get("linkAccount") != nil {
gothUser := ctx.Session.Get("linkAccountGothUser")
if gothUser == nil {
ctx.ServerError("UserSignIn", errors.New("not in LinkAccount session"))
return
}
err = models.LinkAccountToUser(user, gothUser.(goth.User))
if err != nil {
ctx.ServerError("UserSignIn", err)
return
}
}
redirect := handleSignInFull(ctx, user, remember, false)
if redirect == "" {
redirect = setting.AppSubURL + "/"
}
ctx.PlainText(200, []byte(redirect))
return
}
}
ctx.Error(401)
}
// This handles the final part of the sign-in process of the user.
func handleSignIn(ctx *context.Context, u *models.User, remember bool) {
handleSignInFull(ctx, u, remember, true)
}
func handleSignInFull(ctx *context.Context, u *models.User, remember bool, obeyRedirect bool) {
func handleSignInFull(ctx *context.Context, u *models.User, remember bool, obeyRedirect bool) string {
if remember {
days := 86400 * setting.LogInRememberDays
ctx.SetCookie(setting.CookieUserName, u.Name, days, setting.AppSubURL)
@ -336,6 +447,8 @@ func handleSignInFull(ctx *context.Context, u *models.User, remember bool, obeyR
ctx.Session.Delete("openid_determined_username")
ctx.Session.Delete("twofaUid")
ctx.Session.Delete("twofaRemember")
ctx.Session.Delete("u2fChallenge")
ctx.Session.Delete("linkAccount")
ctx.Session.Set("uid", u.ID)
ctx.Session.Set("uname", u.Name)
@ -345,7 +458,7 @@ func handleSignInFull(ctx *context.Context, u *models.User, remember bool, obeyR
u.Language = ctx.Locale.Language()
if err := models.UpdateUserCols(u, "language"); err != nil {
log.Error(4, fmt.Sprintf("Error updating user language [user: %d, locale: %s]", u.ID, u.Language))
return
return setting.AppSubURL + "/"
}
}
@ -358,7 +471,7 @@ func handleSignInFull(ctx *context.Context, u *models.User, remember bool, obeyR
u.SetLastLogin()
if err := models.UpdateUserCols(u, "last_login_unix"); err != nil {
ctx.ServerError("UpdateUserCols", err)
return
return setting.AppSubURL + "/"
}
if redirectTo, _ := url.QueryUnescape(ctx.GetCookie("redirect_to")); len(redirectTo) > 0 {
@ -366,12 +479,13 @@ func handleSignInFull(ctx *context.Context, u *models.User, remember bool, obeyR
if obeyRedirect {
ctx.RedirectToFirst(redirectTo)
}
return
return redirectTo
}
if obeyRedirect {
ctx.Redirect(setting.AppSubURL + "/")
}
return setting.AppSubURL + "/"
}
// SignInOAuth handles the OAuth2 login buttons
@ -467,6 +581,14 @@ func handleOAuth2SignIn(u *models.User, gothUser goth.User, ctx *context.Context
// User needs to use 2FA, save data and redirect to 2FA page.
ctx.Session.Set("twofaUid", u.ID)
ctx.Session.Set("twofaRemember", false)
// If U2F is enrolled -> Redirect to U2F instead
regs, err := models.GetU2FRegistrationsByUID(u.ID)
if err == nil && len(regs) > 0 {
ctx.Redirect(setting.AppSubURL + "/user/u2f")
return
}
ctx.Redirect(setting.AppSubURL + "/user/two_factor")
}
@ -593,6 +715,13 @@ func LinkAccountPostSignIn(ctx *context.Context, signInForm auth.SignInForm) {
ctx.Session.Set("twofaRemember", signInForm.Remember)
ctx.Session.Set("linkAccount", true)
// If U2F is enrolled -> Redirect to U2F instead
regs, err := models.GetU2FRegistrationsByUID(u.ID)
if err == nil && len(regs) > 0 {
ctx.Redirect(setting.AppSubURL + "/user/u2f")
return
}
ctx.Redirect(setting.AppSubURL + "/user/two_factor")
}

View File

@ -118,7 +118,7 @@ func UpdateAvatarSetting(ctx *context.Context, form auth.AvatarForm, ctxUser *mo
ctxUser.AvatarEmail = form.Gravatar
}
if form.Avatar != nil {
if form.Avatar.Filename != "" {
fr, err := form.Avatar.Open()
if err != nil {
return fmt.Errorf("Avatar.Open: %v", err)

View File

@ -33,6 +33,14 @@ func Security(ctx *context.Context) {
}
}
ctx.Data["TwofaEnrolled"] = enrolled
if enrolled {
ctx.Data["U2FRegistrations"], err = models.GetU2FRegistrationsByUID(ctx.User.ID)
if err != nil {
ctx.ServerError("GetU2FRegistrationsByUID", err)
return
}
ctx.Data["RequireU2F"] = true
}
tokens, err := models.ListAccessTokens(ctx.User.ID)
if err != nil {

View File

@ -0,0 +1,99 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package setting
import (
"errors"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/setting"
"github.com/tstranex/u2f"
)
// U2FRegister initializes the u2f registration procedure
func U2FRegister(ctx *context.Context, form auth.U2FRegistrationForm) {
if form.Name == "" {
ctx.Error(409)
return
}
challenge, err := u2f.NewChallenge(setting.U2F.AppID, setting.U2F.TrustedFacets)
if err != nil {
ctx.ServerError("NewChallenge", err)
return
}
err = ctx.Session.Set("u2fChallenge", challenge)
if err != nil {
ctx.ServerError("Session.Set", err)
return
}
regs, err := models.GetU2FRegistrationsByUID(ctx.User.ID)
if err != nil {
ctx.ServerError("GetU2FRegistrationsByUID", err)
return
}
for _, reg := range regs {
if reg.Name == form.Name {
ctx.Error(409, "Name already taken")
return
}
}
ctx.Session.Set("u2fName", form.Name)
ctx.JSON(200, u2f.NewWebRegisterRequest(challenge, regs.ToRegistrations()))
}
// U2FRegisterPost receives the response of the security key
func U2FRegisterPost(ctx *context.Context, response u2f.RegisterResponse) {
challSess := ctx.Session.Get("u2fChallenge")
u2fName := ctx.Session.Get("u2fName")
if challSess == nil || u2fName == nil {
ctx.ServerError("U2FRegisterPost", errors.New("not in U2F session"))
return
}
challenge := challSess.(*u2f.Challenge)
name := u2fName.(string)
config := &u2f.Config{
// Chrome 66+ doesn't return the device's attestation
// certificate by default.
SkipAttestationVerify: true,
}
reg, err := u2f.Register(response, *challenge, config)
if err != nil {
ctx.ServerError("u2f.Register", err)
return
}
if _, err = models.CreateRegistration(ctx.User, name, reg); err != nil {
ctx.ServerError("u2f.Register", err)
return
}
ctx.Status(200)
}
// U2FDelete deletes an security key by id
func U2FDelete(ctx *context.Context, form auth.U2FDeleteForm) {
reg, err := models.GetU2FRegistrationByID(form.ID)
if err != nil {
if models.IsErrU2FRegistrationNotExist(err) {
ctx.Status(200)
return
}
ctx.ServerError("GetU2FRegistrationByID", err)
return
}
if reg.UserID != ctx.User.ID {
ctx.Status(401)
return
}
if err := models.DeleteRegistration(reg); err != nil {
ctx.ServerError("DeleteRegistration", err)
return
}
ctx.JSON(200, map[string]interface{}{
"redirect": setting.AppSubURL + "/user/settings/security",
})
return
}

View File

@ -64,6 +64,9 @@
{{if .RequireDropzone}}
<script src="{{AppSubUrl}}/vendor/plugins/dropzone/dropzone.js"></script>
{{end}}
{{if .RequireU2F}}
<script src="{{AppSubUrl}}/vendor/plugins/u2f/index.js"></script>
{{end}}
{{if .RequireTribute}}
<script src="{{AppSubUrl}}/vendor/plugins/tribute/tribute.min.js"></script>

View File

@ -0,0 +1,22 @@
{{template "base/head" .}}
<div class="user signin">
<div class="ui middle centered very relaxed page grid">
<div class="column">
<h3 class="ui top attached header">
{{.i18n.Tr "twofa"}}
</h3>
<div class="ui attached segment">
<i class="huge key icon"></i>
<h3>{{.i18n.Tr "u2f_insert_key"}}</h3>
{{template "base/alert" .}}
<p>{{.i18n.Tr "u2f_sign_in"}}</p>
</div>
<div id="wait-for-key" class="ui attached segment"><div class="ui active indeterminate inline loader"></div> {{.i18n.Tr "u2f_press_button"}} </div>
<div class="ui attached segment">
<a href="/user/two_factor">{{.i18n.Tr "u2f_use_twofa"}}</a>
</div>
</div>
</div>
</div>
{{template "user/auth/u2f_error" .}}
{{template "base/footer" .}}

View File

@ -0,0 +1,32 @@
<div class="ui small modal" id="u2f-error">
<div class="header">{{.i18n.Tr "u2f_error"}}</div>
<div class="content">
<div class="ui negative message">
<div class="header">
{{.i18n.Tr "u2f_error"}}
</div>
<div class="hide" id="unsupported-browser">
{{.i18n.Tr "u2f_unsupported_browser"}}
</div>
<div class="hide" id="u2f-error-1">
{{.i18n.Tr "u2f_error_1"}}
</div>
<div class="hide" id="u2f-error-2">
{{.i18n.Tr "u2f_error_2"}}
</div>
<div class="hide" id="u2f-error-3">
{{.i18n.Tr "u2f_error_3"}}
</div>
<div class="hide" id="u2f-error-4">
{{.i18n.Tr "u2f_error_4"}}
</div>
<div class="hide u2f-error-5">
{{.i18n.Tr "u2f_error_5"}}
</div>
</div>
</div>
<div class="actions">
<button onclick="window.location.reload()" class="success ui button hide u2f_error_5">{{.i18n.Tr "u2f_reload"}}</button>
<div class="ui cancel button">{{.i18n.Tr "cancel"}}</div>
</div>
</div>

View File

@ -8,7 +8,7 @@
</h4>
<div class="ui attached segment">
{{if or (.SignedUser.IsLocal) (.SignedUser.IsOAuth2)}}
<form class="ui form" action="{{.Link}}?tp=password" method="post">
<form class="ui form" action="{{AppSubUrl}}/user/settings/account" method="post">
{{.CsrfTokenHtml}}
{{if .SignedUser.IsPasswordSet}}
<div class="required field {{if .Err_OldPassword}}error{{end}}">
@ -49,13 +49,13 @@
<div class="item">
{{if not .IsPrimary}}
<div class="right floated content">
<button class="ui red tiny button delete-button" id="delete-email" data-url="{{$.Link}}/email/delete" data-id="{{.ID}}">
<button class="ui red tiny button delete-button" id="delete-email" data-url="{{AppSubUrl}}/user/settings/account/email/delete" data-id="{{.ID}}">
{{$.i18n.Tr "settings.delete_email"}}
</button>
</div>
{{if .IsActivated}}
<div class="right floated content">
<form action="{{$.Link}}/email" method="post">
<form action="{{AppSubUrl}}/user/settings/account/email" method="post">
{{$.CsrfTokenHtml}}
<input name="_method" type="hidden" value="PRIMARY">
<input name="id" type="hidden" value="{{.ID}}">
@ -73,7 +73,7 @@
</div>
</div>
<div class="ui attached bottom segment">
<form class="ui form" action="{{.Link}}/email" method="post">
<form class="ui form" action="{{AppSubUrl}}/user/settings/account/email" method="post">
{{.CsrfTokenHtml}}
<div class="required field {{if .Err_Email}}error{{end}}">
<label for="email">{{.i18n.Tr "settings.add_new_email"}}</label>
@ -92,7 +92,7 @@
<div class="ui red message">
<p class="text left"><i class="octicon octicon-alert"></i> {{.i18n.Tr "settings.delete_prompt" | Str2html}}</p>
</div>
<form class="ui form ignore-dirty" id="delete-form" action="{{.Link}}/delete" method="post">
<form class="ui form ignore-dirty" id="delete-form" action="{{AppSubUrl}}/user/settings/account/delete" method="post">
{{.CsrfTokenHtml}}
<input class="fake" type="password">
<div class="required field {{if .Err_Password}}error{{end}}">

View File

@ -4,6 +4,7 @@
<div class="ui container">
{{template "base/alert" .}}
{{template "user/settings/security_twofa" .}}
{{template "user/settings/security_u2f" .}}
{{template "user/settings/security_accountlinks" .}}
{{if .EnableOpenIDSignIn}}
{{template "user/settings/security_openid" .}}

View File

@ -10,7 +10,7 @@
{{range $loginSource, $provider := .AccountLinks}}
<div class="item">
<div class="right floated content">
<button class="ui red tiny button delete-button" id="delete-account-link" data-url="{{$.Link}}/account_link" data-id="{{$loginSource.ID}}">
<button class="ui red tiny button delete-button" id="delete-account-link" data-url="{{AppSubUrl}}/user/settings/security/account_link" data-id="{{$loginSource.ID}}">
{{$.i18n.Tr "settings.delete_key"}}
</button>
</div>

View File

@ -9,12 +9,12 @@
{{range .OpenIDs}}
<div class="item">
<div class="right floated content">
<button class="ui red tiny button delete-button" id="delete-openid" data-url="{{$.Link}}/openid/delete" data-id="{{.ID}}">
<button class="ui red tiny button delete-button" id="delete-openid" data-url="{{AppSubUrl}}/user/settings/security/openid/delete" data-id="{{.ID}}">
{{$.i18n.Tr "settings.delete_key"}}
</button>
</div>
<div class="right floated content">
<form action="{{$.Link}}/openid/toggle_visibility" method="post">
<form action="{{AppSubUrl}}/user/settings/security/openid/toggle_visibility" method="post">
{{$.CsrfTokenHtml}}
<input name="id" type="hidden" value="{{.ID}}">
{{if .Show}}
@ -39,11 +39,11 @@
</div>
</div>
<div class="ui attached bottom segment">
<form class="ui form" action="{{.Link}}/openid" method="post">
<form class="ui form" action="{{AppSubUrl}}/user/settings/security/openid" method="post">
{{.CsrfTokenHtml}}
<div class="required field {{if .Err_OpenID}}error{{end}}">
<label for="openid">{{.i18n.Tr "settings.add_new_openid"}}</label>
<input id="openid" name="openid" type="text" autofocus required>
<input id="openid" name="openid" type="text" required>
</div>
<button class="ui green button">
{{.i18n.Tr "settings.add_openid"}}

View File

@ -5,12 +5,12 @@
<p>{{.i18n.Tr "settings.twofa_desc"}}</p>
{{if .TwofaEnrolled}}
<p>{{$.i18n.Tr "settings.twofa_is_enrolled" | Str2html }}</p>
<form class="ui form" action="{{.Link}}/two_factor/regenerate_scratch" method="post" enctype="multipart/form-data">
<form class="ui form" action="{{AppSubUrl}}/user/settings/security/two_factor/regenerate_scratch" method="post" enctype="multipart/form-data">
{{.CsrfTokenHtml}}
<p>{{.i18n.Tr "settings.regenerate_scratch_token_desc"}}</p>
<button class="ui blue button">{{$.i18n.Tr "settings.twofa_scratch_token_regenerate"}}</button>
</form>
<form class="ui form" action="{{.Link}}/two_factor/disable" method="post" enctype="multipart/form-data" id="disable-form">
<form class="ui form" action="{{AppSubUrl}}/user/settings/security/two_factor/disable" method="post" enctype="multipart/form-data" id="disable-form">
{{.CsrfTokenHtml}}
<p>{{.i18n.Tr "settings.twofa_disable_note"}}</p>
<div class="ui red button delete-button" id="disable-twofa" data-type="form" data-form="#disable-form">{{$.i18n.Tr "settings.twofa_disable"}}</div>
@ -18,7 +18,7 @@
{{else}}
<p>{{.i18n.Tr "settings.twofa_not_enrolled"}}</p>
<div class="inline field">
<a class="ui green button" href="{{.Link}}/two_factor/enroll">{{$.i18n.Tr "settings.twofa_enroll"}}</a>
<a class="ui green button" href="{{AppSubUrl}}/user/settings/security/two_factor/enroll">{{$.i18n.Tr "settings.twofa_enroll"}}</a>
</div>
{{end}}
</div>

View File

@ -0,0 +1,56 @@
<h4 class="ui top attached header">
{{.i18n.Tr "settings.u2f"}}
</h4>
<div class="ui attached segment">
<p>{{.i18n.Tr "settings.u2f_desc" | Str2html}}</p>
{{if .TwofaEnrolled}}
<div class="ui key list">
{{range .U2FRegistrations}}
<div class="item">
<div class="right floated content">
<button class="ui red tiny button delete-button" id="delete-registration" data-url="{{$.Link}}/u2f/delete" data-id="{{.ID}}">
{{$.i18n.Tr "settings.delete_key"}}
</button>
</div>
<div class="content">
<strong>{{.Name}}</strong>
</div>
</div>
{{end}}
</div>
<div class="ui form">
{{.CsrfTokenHtml}}
<div class="required field">
<label for="nickname">{{.i18n.Tr "settings.u2f_nickname"}}</label>
<input id="nickname" name="nickname" type="text" required>
</div>
<button id="register-security-key" class="positive ui labeled icon button"><i class="usb icon"></i>{{.i18n.Tr "settings.u2f_register_key"}}</button>
</div>
{{else}}
<b>{{.i18n.Tr "settings.u2f_require_twofa"}}</b>
{{end}}
</div>
<div class="ui small modal" id="register-device">
<div class="header">{{.i18n.Tr "settings.u2f_register_key"}}</div>
<div class="content">
<i class="notched spinner loading icon"></i> {{.i18n.Tr "settings.u2f_press_button"}}
</div>
<div class="actions">
<div class="ui cancel button">{{.i18n.Tr "cancel"}}</div>
</div>
</div>
{{template "user/auth/u2f_error" .}}
<div class="ui small basic delete modal" id="delete-registration">
<div class="ui icon header">
<i class="trash icon"></i>
{{.i18n.Tr "settings.u2f_delete_key"}}
</div>
<div class="content">
<p>{{.i18n.Tr "settings.u2f_delete_key_desc"}}</p>
</div>
{{template "base/delete_modal_actions" .}}
</div>

10
vendor/github.com/RoaringBitmap/roaring/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,10 @@
# This is the official list of roaring authors for copyright purposes.
Todd Gruben (@tgruben),
Daniel Lemire (@lemire),
Elliot Murphy (@statik),
Bob Potter (@bpot),
Tyson Maly (@tvmaly),
Will Glynn (@willglynn),
Brent Pedersen (@brentp)
Maciej Biłas (@maciej)

12
vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,12 @@
# This is the official list of roaring contributors
Todd Gruben (@tgruben),
Daniel Lemire (@lemire),
Elliot Murphy (@statik),
Bob Potter (@bpot),
Tyson Maly (@tvmaly),
Will Glynn (@willglynn),
Brent Pedersen (@brentp),
Jason E. Aten (@glycerine),
Vali Malinoiu (@0x4139),
Forud Ghafouri (@fzerorubigd)

202
vendor/github.com/RoaringBitmap/roaring/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 by the authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

202
vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 by the authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

121
vendor/github.com/RoaringBitmap/roaring/Makefile generated vendored Normal file
View File

@ -0,0 +1,121 @@
.PHONY: help all test format fmtcheck vet lint qa deps clean nuke rle backrle ser fetch-real-roaring-datasets
# Display general help about this command
help:
@echo ""
@echo "The following commands are available:"
@echo ""
@echo " make qa : Run all the tests"
@echo " make test : Run the unit tests"
@echo ""
@echo " make format : Format the source code"
@echo " make fmtcheck : Check if the source code has been formatted"
@echo " make vet : Check for suspicious constructs"
@echo " make lint : Check for style errors"
@echo ""
@echo " make deps : Get the dependencies"
@echo " make clean : Remove any build artifact"
@echo " make nuke : Deletes any intermediate file"
@echo ""
@echo " make fuzz-smat : Fuzzy testing with smat"
@echo " make fuzz-stream : Fuzzy testing with stream deserialization"
@echo " make fuzz-buffer : Fuzzy testing with buffer deserialization"
@echo ""
# Alias for help target
all: help
test:
go test
go test -race -run TestConcurrent*
# Format the source code
format:
@find ./ -type f -name "*.go" -exec gofmt -w {} \;
# Check if the source code has been formatted
fmtcheck:
@mkdir -p target
@find ./ -type f -name "*.go" -exec gofmt -d {} \; | tee target/format.diff
@test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; }
# Check for syntax errors
vet:
GOPATH=$(GOPATH) go vet ./...
# Check for style errors
lint:
GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint ./...
# Alias to run all quality-assurance checks
qa: fmtcheck test vet lint
# --- INSTALL ---
# Get the dependencies
deps:
GOPATH=$(GOPATH) go get github.com/smartystreets/goconvey/convey
GOPATH=$(GOPATH) go get github.com/willf/bitset
GOPATH=$(GOPATH) go get github.com/golang/lint/golint
GOPATH=$(GOPATH) go get github.com/mschoch/smat
GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz
GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz-build
GOPATH=$(GOPATH) go get github.com/glycerine/go-unsnap-stream
GOPATH=$(GOPATH) go get github.com/philhofer/fwd
GOPATH=$(GOPATH) go get github.com/jtolds/gls
fuzz-smat:
go test -tags=gofuzz -run=TestGenerateSmatCorpus
go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
fuzz-stream:
go-fuzz-build -func FuzzSerializationStream github.com/RoaringBitmap/roaring
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
fuzz-buffer:
go-fuzz-build -func FuzzSerializationBuffer github.com/RoaringBitmap/roaring
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
# Remove any build artifact
clean:
GOPATH=$(GOPATH) go clean ./...
# Deletes any intermediate file
nuke:
rm -rf ./target
GOPATH=$(GOPATH) go clean -i ./...
rle:
cp rle.go rle16.go
perl -pi -e 's/32/16/g' rle16.go
cp rle_test.go rle16_test.go
perl -pi -e 's/32/16/g' rle16_test.go
backrle:
cp rle16.go rle.go
perl -pi -e 's/16/32/g' rle.go
perl -pi -e 's/2032/2016/g' rle.go
ser: rle
go generate
cover:
go test -coverprofile=coverage.out
go tool cover -html=coverage.out
fetch-real-roaring-datasets:
# pull github.com/RoaringBitmap/real-roaring-datasets -> testdata/real-roaring-datasets
git submodule init
git submodule update

246
vendor/github.com/RoaringBitmap/roaring/README.md generated vendored Normal file
View File

@ -0,0 +1,246 @@
roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![Coverage Status](https://coveralls.io/repos/github/RoaringBitmap/roaring/badge.svg?branch=master)](https://coveralls.io/github/RoaringBitmap/roaring?branch=master) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring)
=============
This is a go version of the Roaring bitmap data structure.
Roaring bitmaps are used by several major systems such as [Apache Lucene][lucene] and derivative systems such as [Solr][solr] and
[Elasticsearch][elasticsearch], [Metamarkets' Druid][druid], [LinkedIn Pinot][pinot], [Netflix Atlas][atlas], [Apache Spark][spark], [OpenSearchServer][opensearchserver], [Cloud Torrent][cloudtorrent], [Whoosh][whoosh], [Pilosa][pilosa], [Microsoft Visual Studio Team Services (VSTS)][vsts], and eBay's [Apache Kylin][kylin].
[lucene]: https://lucene.apache.org/
[solr]: https://lucene.apache.org/solr/
[elasticsearch]: https://www.elastic.co/products/elasticsearch
[druid]: http://druid.io/
[spark]: https://spark.apache.org/
[opensearchserver]: http://www.opensearchserver.com
[cloudtorrent]: https://github.com/jpillora/cloud-torrent
[whoosh]: https://bitbucket.org/mchaput/whoosh/wiki/Home
[pilosa]: https://www.pilosa.com/
[kylin]: http://kylin.apache.org/
[pinot]: http://github.com/linkedin/pinot/wiki
[vsts]: https://www.visualstudio.com/team-services/
[atlas]: https://github.com/Netflix/atlas
Roaring bitmaps are found to work well in many important applications:
> Use Roaring for bitmap compression whenever possible. Do not use other bitmap compression methods ([Wang et al., SIGMOD 2017](http://db.ucsd.edu/wp-content/uploads/2017/03/sidm338-wangA.pdf))
The ``roaring`` Go library is used by
* [Cloud Torrent](https://github.com/jpillora/cloud-torrent): a self-hosted remote torrent client
* [runv](https://github.com/hyperhq/runv): an Hypervisor-based runtime for the Open Containers Initiative
* [InfluxDB](https://www.influxdata.com)
* [Pilosa](https://www.pilosa.com/)
* [Bleve](http://www.blevesearch.com)
This library is used in production in several systems, it is part of the [Awesome Go collection](https://awesome-go.com).
There are also [Java](https://github.com/RoaringBitmap/RoaringBitmap) and [C/C++](https://github.com/RoaringBitmap/CRoaring) versions. The Java, C, C++ and Go version are binary compatible: e.g, you can save bitmaps
from a Java program and load them back in Go, and vice versa. We have a [format specification](https://github.com/RoaringBitmap/RoaringFormatSpec).
This code is licensed under Apache License, Version 2.0 (ASL2.0).
Copyright 2016-... by the authors.
### References
- Daniel Lemire, Owen Kaser, Nathan Kurz, Luca Deri, Chris O'Hara, François Saint-Jacques, Gregory Ssi-Yan-Kai, Roaring Bitmaps: Implementation of an Optimized Software Library, Software: Practice and Experience 48 (4), 2018 [arXiv:1709.07821](https://arxiv.org/abs/1709.07821)
- Samy Chambi, Daniel Lemire, Owen Kaser, Robert Godin,
Better bitmap performance with Roaring bitmaps,
Software: Practice and Experience 46 (5), 2016.
http://arxiv.org/abs/1402.6407 This paper used data from http://lemire.me/data/realroaring2014.html
- Daniel Lemire, Gregory Ssi-Yan-Kai, Owen Kaser, Consistently faster and smaller compressed bitmaps with Roaring, Software: Practice and Experience 46 (11), 2016. http://arxiv.org/abs/1603.06549
### Dependencies
Dependencies are fetched automatically by giving the `-t` flag to `go get`.
they include
- github.com/smartystreets/goconvey/convey
- github.com/willf/bitset
- github.com/mschoch/smat
- github.com/glycerine/go-unsnap-stream
- github.com/philhofer/fwd
- github.com/jtolds/gls
Note that the smat library requires Go 1.6 or better.
#### Installation
- go get -t github.com/RoaringBitmap/roaring
### Example
Here is a simplified but complete example:
```go
package main
import (
"fmt"
"github.com/RoaringBitmap/roaring"
"bytes"
)
func main() {
// example inspired by https://github.com/fzandona/goroar
fmt.Println("==roaring==")
rb1 := roaring.BitmapOf(1, 2, 3, 4, 5, 100, 1000)
fmt.Println(rb1.String())
rb2 := roaring.BitmapOf(3, 4, 1000)
fmt.Println(rb2.String())
rb3 := roaring.New()
fmt.Println(rb3.String())
fmt.Println("Cardinality: ", rb1.GetCardinality())
fmt.Println("Contains 3? ", rb1.Contains(3))
rb1.And(rb2)
rb3.Add(1)
rb3.Add(5)
rb3.Or(rb1)
// computes union of the three bitmaps in parallel using 4 workers
roaring.ParOr(4, rb1, rb2, rb3)
// computes intersection of the three bitmaps in parallel using 4 workers
roaring.ParAnd(4, rb1, rb2, rb3)
// prints 1, 3, 4, 5, 1000
i := rb3.Iterator()
for i.HasNext() {
fmt.Println(i.Next())
}
fmt.Println()
// next we include an example of serialization
buf := new(bytes.Buffer)
rb1.WriteTo(buf) // we omit error handling
newrb:= roaring.New()
newrb.ReadFrom(buf)
if rb1.Equals(newrb) {
fmt.Println("I wrote the content to a byte stream and read it back.")
}
}
```
If you wish to use serialization and handle errors, you might want to
consider the following sample of code:
```go
rb := BitmapOf(1, 2, 3, 4, 5, 100, 1000)
buf := new(bytes.Buffer)
size,err:=rb.WriteTo(buf)
if err != nil {
t.Errorf("Failed writing")
}
newrb:= New()
size,err=newrb.ReadFrom(buf)
if err != nil {
t.Errorf("Failed reading")
}
if ! rb.Equals(newrb) {
t.Errorf("Cannot retrieve serialized version")
}
```
Given N integers in [0,x), then the serialized size in bytes of
a Roaring bitmap should never exceed this bound:
`` 8 + 9 * ((long)x+65535)/65536 + 2 * N ``
That is, given a fixed overhead for the universe size (x), Roaring
bitmaps never use more than 2 bytes per integer. You can call
``BoundSerializedSizeInBytes`` for a more precise estimate.
### Documentation
Current documentation is available at http://godoc.org/github.com/RoaringBitmap/roaring
### Goroutine safety
In general, it should not generally be considered safe to access
the same bitmaps using different goroutines--they are left
unsynchronized for performance. Should you want to access
a Bitmap from more than one goroutine, you should
provide synchronization. Typically this is done by using channels to pass
the *Bitmap around (in Go style; so there is only ever one owner),
or by using `sync.Mutex` to serialize operations on Bitmaps.
### Coverage
We test our software. For a report on our test coverage, see
https://coveralls.io/github/RoaringBitmap/roaring?branch=master
### Benchmark
Type
go test -bench Benchmark -run -
To run benchmarks on [Real Roaring Datasets](https://github.com/RoaringBitmap/real-roaring-datasets)
run the following:
```sh
go get github.com/RoaringBitmap/real-roaring-datasets
BENCH_REAL_DATA=1 go test -bench BenchmarkRealData -run -
```
### Iterative use
You can use roaring with gore:
- go get -u github.com/motemen/gore
- Make sure that ``$GOPATH/bin`` is in your ``$PATH``.
- go get github/RoaringBitmap/roaring
```go
$ gore
gore version 0.2.6 :help for help
gore> :import github.com/RoaringBitmap/roaring
gore> x:=roaring.New()
gore> x.Add(1)
gore> x.String()
"{1}"
```
### Fuzzy testing
You can help us test further the library with fuzzy testing:
go get github.com/dvyukov/go-fuzz/go-fuzz
go get github.com/dvyukov/go-fuzz/go-fuzz-build
go test -tags=gofuzz -run=TestGenerateSmatCorpus
go-fuzz-build github.com/RoaringBitmap/roaring
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
Let it run, and if the # of crashers is > 0, check out the reports in
the workdir where you should be able to find the panic goroutine stack
traces.
### Alternative in Go
There is a Go version wrapping the C/C++ implementation https://github.com/RoaringBitmap/gocroaring
For an alternative implementation in Go, see https://github.com/fzandona/goroar
The two versions were written independently.
### Mailing list/discussion group
https://groups.google.com/forum/#!forum/roaring-bitmaps

View File

@ -0,0 +1,960 @@
package roaring
import (
"fmt"
)
//go:generate msgp -unexported
type arrayContainer struct {
content []uint16
}
func (ac *arrayContainer) String() string {
s := "{"
for it := ac.getShortIterator(); it.hasNext(); {
s += fmt.Sprintf("%v, ", it.next())
}
return s + "}"
}
func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
for k := 0; k < len(ac.content); k++ {
x[k+i] = uint32(ac.content[k]) | mask
}
}
func (ac *arrayContainer) getShortIterator() shortIterable {
return &shortIterator{ac.content, 0}
}
func (ac *arrayContainer) getManyIterator() manyIterable {
return &manyIterator{ac.content, 0}
}
func (ac *arrayContainer) minimum() uint16 {
return ac.content[0] // assume not empty
}
func (ac *arrayContainer) maximum() uint16 {
return ac.content[len(ac.content)-1] // assume not empty
}
func (ac *arrayContainer) getSizeInBytes() int {
return ac.getCardinality() * 2
}
func (ac *arrayContainer) serializedSizeInBytes() int {
return ac.getCardinality() * 2
}
func arrayContainerSizeInBytes(card int) int {
return card * 2
}
// add the values in the range [firstOfRange,endx)
func (ac *arrayContainer) iaddRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
return ac
}
indexstart := binarySearch(ac.content, uint16(firstOfRange))
if indexstart < 0 {
indexstart = -indexstart - 1
}
indexend := binarySearch(ac.content, uint16(endx-1))
if indexend < 0 {
indexend = -indexend - 1
} else {
indexend++
}
rangelength := endx - firstOfRange
newcardinality := indexstart + (ac.getCardinality() - indexend) + rangelength
if newcardinality > arrayDefaultMaxSize {
a := ac.toBitmapContainer()
return a.iaddRange(firstOfRange, endx)
}
if cap(ac.content) < newcardinality {
tmp := make([]uint16, newcardinality, newcardinality)
copy(tmp[:indexstart], ac.content[:indexstart])
copy(tmp[indexstart+rangelength:], ac.content[indexend:])
ac.content = tmp
} else {
ac.content = ac.content[:newcardinality]
copy(ac.content[indexstart+rangelength:], ac.content[indexend:])
}
for k := 0; k < rangelength; k++ {
ac.content[k+indexstart] = uint16(firstOfRange + k)
}
return ac
}
// remove the values in the range [firstOfRange,endx)
func (ac *arrayContainer) iremoveRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
return ac
}
indexstart := binarySearch(ac.content, uint16(firstOfRange))
if indexstart < 0 {
indexstart = -indexstart - 1
}
indexend := binarySearch(ac.content, uint16(endx-1))
if indexend < 0 {
indexend = -indexend - 1
} else {
indexend++
}
rangelength := indexend - indexstart
answer := ac
copy(answer.content[indexstart:], ac.content[indexstart+rangelength:])
answer.content = answer.content[:ac.getCardinality()-rangelength]
return answer
}
// flip the values in the range [firstOfRange,endx)
func (ac *arrayContainer) not(firstOfRange, endx int) container {
if firstOfRange >= endx {
//p("arrayContainer.not(): exiting early with ac.clone()")
return ac.clone()
}
return ac.notClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1]
}
// flip the values in the range [firstOfRange,lastOfRange]
func (ac *arrayContainer) notClose(firstOfRange, lastOfRange int) container {
if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
//p("arrayContainer.notClose(): exiting early with ac.clone()")
return ac.clone()
}
// determine the span of array indices to be affected^M
startIndex := binarySearch(ac.content, uint16(firstOfRange))
//p("startIndex=%v", startIndex)
if startIndex < 0 {
startIndex = -startIndex - 1
}
lastIndex := binarySearch(ac.content, uint16(lastOfRange))
//p("lastIndex=%v", lastIndex)
if lastIndex < 0 {
lastIndex = -lastIndex - 2
}
currentValuesInRange := lastIndex - startIndex + 1
spanToBeFlipped := lastOfRange - firstOfRange + 1
newValuesInRange := spanToBeFlipped - currentValuesInRange
cardinalityChange := newValuesInRange - currentValuesInRange
newCardinality := len(ac.content) + cardinalityChange
//p("new card is %v", newCardinality)
if newCardinality > arrayDefaultMaxSize {
//p("new card over arrayDefaultMaxSize, so returning bitmap")
return ac.toBitmapContainer().not(firstOfRange, lastOfRange+1)
}
answer := newArrayContainer()
answer.content = make([]uint16, newCardinality, newCardinality) //a hack for sure
copy(answer.content, ac.content[:startIndex])
outPos := startIndex
inPos := startIndex
valInRange := firstOfRange
for ; valInRange <= lastOfRange && inPos <= lastIndex; valInRange++ {
if uint16(valInRange) != ac.content[inPos] {
answer.content[outPos] = uint16(valInRange)
outPos++
} else {
inPos++
}
}
for ; valInRange <= lastOfRange; valInRange++ {
answer.content[outPos] = uint16(valInRange)
outPos++
}
for i := lastIndex + 1; i < len(ac.content); i++ {
answer.content[outPos] = ac.content[i]
outPos++
}
answer.content = answer.content[:newCardinality]
return answer
}
func (ac *arrayContainer) equals(o container) bool {
srb, ok := o.(*arrayContainer)
if ok {
// Check if the containers are the same object.
if ac == srb {
return true
}
if len(srb.content) != len(ac.content) {
return false
}
for i, v := range ac.content {
if v != srb.content[i] {
return false
}
}
return true
}
// use generic comparison
bCard := o.getCardinality()
aCard := ac.getCardinality()
if bCard != aCard {
return false
}
ait := ac.getShortIterator()
bit := o.getShortIterator()
for ait.hasNext() {
if bit.next() != ait.next() {
return false
}
}
return true
}
func (ac *arrayContainer) toBitmapContainer() *bitmapContainer {
bc := newBitmapContainer()
bc.loadData(ac)
return bc
}
func (ac *arrayContainer) iadd(x uint16) (wasNew bool) {
// Special case adding to the end of the container.
l := len(ac.content)
if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x {
ac.content = append(ac.content, x)
return true
}
loc := binarySearch(ac.content, x)
if loc < 0 {
s := ac.content
i := -loc - 1
s = append(s, 0)
copy(s[i+1:], s[i:])
s[i] = x
ac.content = s
return true
}
return false
}
func (ac *arrayContainer) iaddReturnMinimized(x uint16) container {
// Special case adding to the end of the container.
l := len(ac.content)
if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x {
ac.content = append(ac.content, x)
return ac
}
loc := binarySearch(ac.content, x)
if loc < 0 {
if len(ac.content) >= arrayDefaultMaxSize {
a := ac.toBitmapContainer()
a.iadd(x)
return a
}
s := ac.content
i := -loc - 1
s = append(s, 0)
copy(s[i+1:], s[i:])
s[i] = x
ac.content = s
}
return ac
}
// iremoveReturnMinimized is allowed to change the return type to minimize storage.
func (ac *arrayContainer) iremoveReturnMinimized(x uint16) container {
ac.iremove(x)
return ac
}
func (ac *arrayContainer) iremove(x uint16) bool {
loc := binarySearch(ac.content, x)
if loc >= 0 {
s := ac.content
s = append(s[:loc], s[loc+1:]...)
ac.content = s
return true
}
return false
}
func (ac *arrayContainer) remove(x uint16) container {
out := &arrayContainer{make([]uint16, len(ac.content))}
copy(out.content, ac.content[:])
loc := binarySearch(out.content, x)
if loc >= 0 {
s := out.content
s = append(s[:loc], s[loc+1:]...)
out.content = s
}
return out
}
func (ac *arrayContainer) or(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.orArray(x)
case *bitmapContainer:
return x.orArray(ac)
case *runContainer16:
if x.isFull() {
return x.clone()
}
return x.orArray(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) orCardinality(a container) int {
switch x := a.(type) {
case *arrayContainer:
return ac.orArrayCardinality(x)
case *bitmapContainer:
return x.orArrayCardinality(ac)
case *runContainer16:
return x.orArrayCardinality(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) ior(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.iorArray(x)
case *bitmapContainer:
return a.(*bitmapContainer).orArray(ac)
//return ac.iorBitmap(x) // note: this does not make sense
case *runContainer16:
if x.isFull() {
return x.clone()
}
return ac.iorRun16(x)
}
panic("unsupported container type")
}
func (ac *arrayContainer) iorArray(value2 *arrayContainer) container {
value1 := ac
len1 := value1.getCardinality()
len2 := value2.getCardinality()
maxPossibleCardinality := len1 + len2
if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap!
bc := newBitmapContainer()
for k := 0; k < len(value2.content); k++ {
v := value2.content[k]
i := uint(v) >> 6
mask := uint64(1) << (v % 64)
bc.bitmap[i] |= mask
}
for k := 0; k < len(ac.content); k++ {
v := ac.content[k]
i := uint(v) >> 6
mask := uint64(1) << (v % 64)
bc.bitmap[i] |= mask
}
bc.cardinality = int(popcntSlice(bc.bitmap))
if bc.cardinality <= arrayDefaultMaxSize {
return bc.toArrayContainer()
}
return bc
}
if maxPossibleCardinality > cap(value1.content) {
newcontent := make([]uint16, 0, maxPossibleCardinality)
copy(newcontent[len2:maxPossibleCardinality], ac.content[0:len1])
ac.content = newcontent
} else {
copy(ac.content[len2:maxPossibleCardinality], ac.content[0:len1])
}
nl := union2by2(value1.content[len2:maxPossibleCardinality], value2.content, ac.content)
ac.content = ac.content[:nl] // reslice to match actual used capacity
return ac
}
// Note: such code does not make practical sense, except for lazy evaluations
func (ac *arrayContainer) iorBitmap(bc2 *bitmapContainer) container {
bc1 := ac.toBitmapContainer()
bc1.iorBitmap(bc2)
*ac = *newArrayContainerFromBitmap(bc1)
return ac
}
func (ac *arrayContainer) iorRun16(rc *runContainer16) container {
bc1 := ac.toBitmapContainer()
bc2 := rc.toBitmapContainer()
bc1.iorBitmap(bc2)
*ac = *newArrayContainerFromBitmap(bc1)
return ac
}
func (ac *arrayContainer) lazyIOR(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.lazyIorArray(x)
case *bitmapContainer:
return ac.lazyIorBitmap(x)
case *runContainer16:
if x.isFull() {
return x.clone()
}
return ac.lazyIorRun16(x)
}
panic("unsupported container type")
}
func (ac *arrayContainer) lazyIorArray(ac2 *arrayContainer) container {
// TODO actually make this lazy
return ac.iorArray(ac2)
}
func (ac *arrayContainer) lazyIorBitmap(bc *bitmapContainer) container {
// TODO actually make this lazy
return ac.iorBitmap(bc)
}
func (ac *arrayContainer) lazyIorRun16(rc *runContainer16) container {
// TODO actually make this lazy
return ac.iorRun16(rc)
}
func (ac *arrayContainer) lazyOR(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.lazyorArray(x)
case *bitmapContainer:
return a.lazyOR(ac)
case *runContainer16:
if x.isFull() {
return x.clone()
}
return x.orArray(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) orArray(value2 *arrayContainer) container {
value1 := ac
maxPossibleCardinality := value1.getCardinality() + value2.getCardinality()
if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap!
bc := newBitmapContainer()
for k := 0; k < len(value2.content); k++ {
v := value2.content[k]
i := uint(v) >> 6
mask := uint64(1) << (v % 64)
bc.bitmap[i] |= mask
}
for k := 0; k < len(ac.content); k++ {
v := ac.content[k]
i := uint(v) >> 6
mask := uint64(1) << (v % 64)
bc.bitmap[i] |= mask
}
bc.cardinality = int(popcntSlice(bc.bitmap))
if bc.cardinality <= arrayDefaultMaxSize {
return bc.toArrayContainer()
}
return bc
}
answer := newArrayContainerCapacity(maxPossibleCardinality)
nl := union2by2(value1.content, value2.content, answer.content)
answer.content = answer.content[:nl] // reslice to match actual used capacity
return answer
}
func (ac *arrayContainer) orArrayCardinality(value2 *arrayContainer) int {
return union2by2Cardinality(ac.content, value2.content)
}
func (ac *arrayContainer) lazyorArray(value2 *arrayContainer) container {
value1 := ac
maxPossibleCardinality := value1.getCardinality() + value2.getCardinality()
if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap!^M
bc := newBitmapContainer()
for k := 0; k < len(value2.content); k++ {
v := value2.content[k]
i := uint(v) >> 6
mask := uint64(1) << (v % 64)
bc.bitmap[i] |= mask
}
for k := 0; k < len(ac.content); k++ {
v := ac.content[k]
i := uint(v) >> 6
mask := uint64(1) << (v % 64)
bc.bitmap[i] |= mask
}
bc.cardinality = invalidCardinality
return bc
}
answer := newArrayContainerCapacity(maxPossibleCardinality)
nl := union2by2(value1.content, value2.content, answer.content)
answer.content = answer.content[:nl] // reslice to match actual used capacity
return answer
}
func (ac *arrayContainer) and(a container) container {
//p("ac.and() called")
switch x := a.(type) {
case *arrayContainer:
return ac.andArray(x)
case *bitmapContainer:
return x.and(ac)
case *runContainer16:
if x.isFull() {
return ac.clone()
}
return x.andArray(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) andCardinality(a container) int {
switch x := a.(type) {
case *arrayContainer:
return ac.andArrayCardinality(x)
case *bitmapContainer:
return x.andCardinality(ac)
case *runContainer16:
return x.andArrayCardinality(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) intersects(a container) bool {
switch x := a.(type) {
case *arrayContainer:
return ac.intersectsArray(x)
case *bitmapContainer:
return x.intersects(ac)
case *runContainer16:
return x.intersects(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) iand(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.iandArray(x)
case *bitmapContainer:
return ac.iandBitmap(x)
case *runContainer16:
if x.isFull() {
return ac.clone()
}
return x.andArray(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) iandBitmap(bc *bitmapContainer) container {
pos := 0
c := ac.getCardinality()
for k := 0; k < c; k++ {
// branchless
v := ac.content[k]
ac.content[pos] = v
pos += int(bc.bitValue(v))
}
ac.content = ac.content[:pos]
return ac
}
func (ac *arrayContainer) xor(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.xorArray(x)
case *bitmapContainer:
return a.xor(ac)
case *runContainer16:
return x.xorArray(ac)
}
panic("unsupported container type")
}
func (ac *arrayContainer) xorArray(value2 *arrayContainer) container {
value1 := ac
totalCardinality := value1.getCardinality() + value2.getCardinality()
if totalCardinality > arrayDefaultMaxSize { // it could be a bitmap!
bc := newBitmapContainer()
for k := 0; k < len(value2.content); k++ {
v := value2.content[k]
i := uint(v) >> 6
bc.bitmap[i] ^= (uint64(1) << (v % 64))
}
for k := 0; k < len(ac.content); k++ {
v := ac.content[k]
i := uint(v) >> 6
bc.bitmap[i] ^= (uint64(1) << (v % 64))
}
bc.computeCardinality()
if bc.cardinality <= arrayDefaultMaxSize {
return bc.toArrayContainer()
}
return bc
}
desiredCapacity := totalCardinality
answer := newArrayContainerCapacity(desiredCapacity)
length := exclusiveUnion2by2(value1.content, value2.content, answer.content)
answer.content = answer.content[:length]
return answer
}
func (ac *arrayContainer) andNot(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.andNotArray(x)
case *bitmapContainer:
return ac.andNotBitmap(x)
case *runContainer16:
return ac.andNotRun16(x)
}
panic("unsupported container type")
}
func (ac *arrayContainer) andNotRun16(rc *runContainer16) container {
acb := ac.toBitmapContainer()
rcb := rc.toBitmapContainer()
return acb.andNotBitmap(rcb)
}
func (ac *arrayContainer) iandNot(a container) container {
switch x := a.(type) {
case *arrayContainer:
return ac.iandNotArray(x)
case *bitmapContainer:
return ac.iandNotBitmap(x)
case *runContainer16:
return ac.iandNotRun16(x)
}
panic("unsupported container type")
}
func (ac *arrayContainer) iandNotRun16(rc *runContainer16) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
acb.iandNotBitmapSurely(rcb)
*ac = *(acb.toArrayContainer())
return ac
}
func (ac *arrayContainer) andNotArray(value2 *arrayContainer) container {
value1 := ac
desiredcapacity := value1.getCardinality()
answer := newArrayContainerCapacity(desiredcapacity)
length := difference(value1.content, value2.content, answer.content)
answer.content = answer.content[:length]
return answer
}
func (ac *arrayContainer) iandNotArray(value2 *arrayContainer) container {
length := difference(ac.content, value2.content, ac.content)
ac.content = ac.content[:length]
return ac
}
func (ac *arrayContainer) andNotBitmap(value2 *bitmapContainer) container {
desiredcapacity := ac.getCardinality()
answer := newArrayContainerCapacity(desiredcapacity)
answer.content = answer.content[:desiredcapacity]
pos := 0
for _, v := range ac.content {
answer.content[pos] = v
pos += 1 - int(value2.bitValue(v))
}
answer.content = answer.content[:pos]
return answer
}
func (ac *arrayContainer) andBitmap(value2 *bitmapContainer) container {
desiredcapacity := ac.getCardinality()
answer := newArrayContainerCapacity(desiredcapacity)
answer.content = answer.content[:desiredcapacity]
pos := 0
for _, v := range ac.content {
answer.content[pos] = v
pos += int(value2.bitValue(v))
}
answer.content = answer.content[:pos]
return answer
}
func (ac *arrayContainer) iandNotBitmap(value2 *bitmapContainer) container {
pos := 0
for _, v := range ac.content {
ac.content[pos] = v
pos += 1 - int(value2.bitValue(v))
}
ac.content = ac.content[:pos]
return ac
}
func copyOf(array []uint16, size int) []uint16 {
result := make([]uint16, size)
for i, x := range array {
if i == size {
break
}
result[i] = x
}
return result
}
// flip the values in the range [firstOfRange,endx)
func (ac *arrayContainer) inot(firstOfRange, endx int) container {
if firstOfRange >= endx {
return ac
}
return ac.inotClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1]
}
// flip the values in the range [firstOfRange,lastOfRange]
func (ac *arrayContainer) inotClose(firstOfRange, lastOfRange int) container {
//p("ac.inotClose() starting")
if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
return ac
}
// determine the span of array indices to be affected
startIndex := binarySearch(ac.content, uint16(firstOfRange))
if startIndex < 0 {
startIndex = -startIndex - 1
}
lastIndex := binarySearch(ac.content, uint16(lastOfRange))
if lastIndex < 0 {
lastIndex = -lastIndex - 1 - 1
}
currentValuesInRange := lastIndex - startIndex + 1
spanToBeFlipped := lastOfRange - firstOfRange + 1
newValuesInRange := spanToBeFlipped - currentValuesInRange
buffer := make([]uint16, newValuesInRange)
cardinalityChange := newValuesInRange - currentValuesInRange
newCardinality := len(ac.content) + cardinalityChange
if cardinalityChange > 0 {
if newCardinality > len(ac.content) {
if newCardinality > arrayDefaultMaxSize {
//p("ac.inotClose() converting to bitmap and doing inot there")
bcRet := ac.toBitmapContainer()
bcRet.inot(firstOfRange, lastOfRange+1)
*ac = *bcRet.toArrayContainer()
return bcRet
}
ac.content = copyOf(ac.content, newCardinality)
}
base := lastIndex + 1
copy(ac.content[lastIndex+1+cardinalityChange:], ac.content[base:base+len(ac.content)-1-lastIndex])
ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1)
} else { // no expansion needed
ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1)
if cardinalityChange < 0 {
for i := startIndex + newValuesInRange; i < newCardinality; i++ {
ac.content[i] = ac.content[i-cardinalityChange]
}
}
}
ac.content = ac.content[:newCardinality]
//p("bottom of ac.inotClose(): returning ac")
return ac
}
func (ac *arrayContainer) negateRange(buffer []uint16, startIndex, lastIndex, startRange, lastRange int) {
// compute the negation into buffer
outPos := 0
inPos := startIndex // value here always >= valInRange,
// until it is exhausted
// n.b., we can start initially exhausted.
valInRange := startRange
for ; valInRange < lastRange && inPos <= lastIndex; valInRange++ {
if uint16(valInRange) != ac.content[inPos] {
buffer[outPos] = uint16(valInRange)
outPos++
} else {
inPos++
}
}
// if there are extra items (greater than the biggest
// pre-existing one in range), buffer them
for ; valInRange < lastRange; valInRange++ {
buffer[outPos] = uint16(valInRange)
outPos++
}
if outPos != len(buffer) {
panic("negateRange: internal bug")
}
for i, item := range buffer {
ac.content[i+startIndex] = item
}
}
func (ac *arrayContainer) isFull() bool {
return false
}
func (ac *arrayContainer) andArray(value2 *arrayContainer) container {
desiredcapacity := minOfInt(ac.getCardinality(), value2.getCardinality())
answer := newArrayContainerCapacity(desiredcapacity)
length := intersection2by2(
ac.content,
value2.content,
answer.content)
answer.content = answer.content[:length]
return answer
}
func (ac *arrayContainer) andArrayCardinality(value2 *arrayContainer) int {
return intersection2by2Cardinality(
ac.content,
value2.content)
}
func (ac *arrayContainer) intersectsArray(value2 *arrayContainer) bool {
return intersects2by2(
ac.content,
value2.content)
}
func (ac *arrayContainer) iandArray(value2 *arrayContainer) container {
length := intersection2by2(
ac.content,
value2.content,
ac.content)
ac.content = ac.content[:length]
return ac
}
func (ac *arrayContainer) getCardinality() int {
return len(ac.content)
}
func (ac *arrayContainer) rank(x uint16) int {
answer := binarySearch(ac.content, x)
if answer >= 0 {
return answer + 1
}
return -answer - 1
}
func (ac *arrayContainer) selectInt(x uint16) int {
return int(ac.content[x])
}
func (ac *arrayContainer) clone() container {
ptr := arrayContainer{make([]uint16, len(ac.content))}
copy(ptr.content, ac.content[:])
return &ptr
}
func (ac *arrayContainer) contains(x uint16) bool {
return binarySearch(ac.content, x) >= 0
}
func (ac *arrayContainer) loadData(bitmapContainer *bitmapContainer) {
ac.content = make([]uint16, bitmapContainer.cardinality, bitmapContainer.cardinality)
bitmapContainer.fillArray(ac.content)
}
func newArrayContainer() *arrayContainer {
p := new(arrayContainer)
return p
}
func newArrayContainerFromBitmap(bc *bitmapContainer) *arrayContainer {
ac := &arrayContainer{}
ac.loadData(bc)
return ac
}
func newArrayContainerCapacity(size int) *arrayContainer {
p := new(arrayContainer)
p.content = make([]uint16, 0, size)
return p
}
func newArrayContainerSize(size int) *arrayContainer {
p := new(arrayContainer)
p.content = make([]uint16, size, size)
return p
}
func newArrayContainerRange(firstOfRun, lastOfRun int) *arrayContainer {
valuesInRange := lastOfRun - firstOfRun + 1
this := newArrayContainerCapacity(valuesInRange)
for i := 0; i < valuesInRange; i++ {
this.content = append(this.content, uint16(firstOfRun+i))
}
return this
}
func (ac *arrayContainer) numberOfRuns() (nr int) {
n := len(ac.content)
var runlen uint16
var cur, prev uint16
switch n {
case 0:
return 0
case 1:
return 1
default:
for i := 1; i < n; i++ {
prev = ac.content[i-1]
cur = ac.content[i]
if cur == prev+1 {
runlen++
} else {
if cur < prev {
panic("then fundamental arrayContainer assumption of sorted ac.content was broken")
}
if cur == prev {
panic("then fundamental arrayContainer assumption of deduplicated content was broken")
} else {
nr++
runlen = 0
}
}
}
nr++
}
return
}
// convert to run or array *if needed*
func (ac *arrayContainer) toEfficientContainer() container {
numRuns := ac.numberOfRuns()
sizeAsRunContainer := runContainer16SerializedSizeInBytes(numRuns)
sizeAsBitmapContainer := bitmapContainerSizeInBytes()
card := ac.getCardinality()
sizeAsArrayContainer := arrayContainerSizeInBytes(card)
if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
return newRunContainer16FromArray(ac)
}
if card <= arrayDefaultMaxSize {
return ac
}
return ac.toBitmapContainer()
}
func (ac *arrayContainer) containerType() contype {
return arrayContype
}

View File

@ -0,0 +1,134 @@
package roaring
// NOTE: THIS FILE WAS PRODUCED BY THE
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
// DO NOT EDIT
import "github.com/tinylib/msgp/msgp"
// DecodeMsg implements msgp.Decodable
func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zbzg uint32
zbzg, err = dc.ReadMapHeader()
if err != nil {
return
}
for zbzg > 0 {
zbzg--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "content":
var zbai uint32
zbai, err = dc.ReadArrayHeader()
if err != nil {
return
}
if cap(z.content) >= int(zbai) {
z.content = (z.content)[:zbai]
} else {
z.content = make([]uint16, zbai)
}
for zxvk := range z.content {
z.content[zxvk], err = dc.ReadUint16()
if err != nil {
return
}
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "content"
err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
if err != nil {
return err
}
err = en.WriteArrayHeader(uint32(len(z.content)))
if err != nil {
return
}
for zxvk := range z.content {
err = en.WriteUint16(z.content[zxvk])
if err != nil {
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "content"
o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
o = msgp.AppendArrayHeader(o, uint32(len(z.content)))
for zxvk := range z.content {
o = msgp.AppendUint16(o, z.content[zxvk])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zcmr uint32
zcmr, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for zcmr > 0 {
zcmr--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "content":
var zajw uint32
zajw, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
if cap(z.content) >= int(zajw) {
z.content = (z.content)[:zajw]
} else {
z.content = make([]uint16, zajw)
}
for zxvk := range z.content {
z.content[zxvk], bts, err = msgp.ReadUint16Bytes(bts)
if err != nil {
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *arrayContainer) Msgsize() (s int) {
s = 1 + 8 + msgp.ArrayHeaderSize + (len(z.content) * (msgp.Uint16Size))
return
}

View File

@ -0,0 +1,982 @@
package roaring
import (
"fmt"
"unsafe"
)
//go:generate msgp -unexported
type bitmapContainer struct {
cardinality int
bitmap []uint64
}
func (bc bitmapContainer) String() string {
var s string
for it := bc.getShortIterator(); it.hasNext(); {
s += fmt.Sprintf("%v, ", it.next())
}
return s
}
func newBitmapContainer() *bitmapContainer {
p := new(bitmapContainer)
size := (1 << 16) / 64
p.bitmap = make([]uint64, size, size)
return p
}
func newBitmapContainerwithRange(firstOfRun, lastOfRun int) *bitmapContainer {
bc := newBitmapContainer()
bc.cardinality = lastOfRun - firstOfRun + 1
if bc.cardinality == maxCapacity {
fill(bc.bitmap, uint64(0xffffffffffffffff))
} else {
firstWord := firstOfRun / 64
lastWord := lastOfRun / 64
zeroPrefixLength := uint64(firstOfRun & 63)
zeroSuffixLength := uint64(63 - (lastOfRun & 63))
fillRange(bc.bitmap, firstWord, lastWord+1, uint64(0xffffffffffffffff))
bc.bitmap[firstWord] ^= ((uint64(1) << zeroPrefixLength) - 1)
blockOfOnes := (uint64(1) << zeroSuffixLength) - 1
maskOnLeft := blockOfOnes << (uint64(64) - zeroSuffixLength)
bc.bitmap[lastWord] ^= maskOnLeft
}
return bc
}
func (bc *bitmapContainer) minimum() uint16 {
for i := 0; i < len(bc.bitmap); i++ {
w := bc.bitmap[i]
if w != 0 {
r := countTrailingZeros(w)
return uint16(r + i*64)
}
}
return MaxUint16
}
// i should be non-zero
func clz(i uint64) int {
n := 1
x := uint32(i >> 32)
if x == 0 {
n += 32
x = uint32(i)
}
if x>>16 == 0 {
n += 16
x = x << 16
}
if x>>24 == 0 {
n += 8
x = x << 8
}
if x>>28 == 0 {
n += 4
x = x << 4
}
if x>>30 == 0 {
n += 2
x = x << 2
}
return n - int(x>>31)
}
func (bc *bitmapContainer) maximum() uint16 {
for i := len(bc.bitmap); i > 0; i-- {
w := bc.bitmap[i-1]
if w != 0 {
r := clz(w)
return uint16((i-1)*64 + 63 - r)
}
}
return uint16(0)
}
type bitmapContainerShortIterator struct {
ptr *bitmapContainer
i int
}
func (bcsi *bitmapContainerShortIterator) next() uint16 {
j := bcsi.i
bcsi.i = bcsi.ptr.NextSetBit(bcsi.i + 1)
return uint16(j)
}
func (bcsi *bitmapContainerShortIterator) hasNext() bool {
return bcsi.i >= 0
}
func newBitmapContainerShortIterator(a *bitmapContainer) *bitmapContainerShortIterator {
return &bitmapContainerShortIterator{a, a.NextSetBit(0)}
}
func (bc *bitmapContainer) getShortIterator() shortIterable {
return newBitmapContainerShortIterator(bc)
}
type bitmapContainerManyIterator struct {
ptr *bitmapContainer
base int
bitset uint64
}
func (bcmi *bitmapContainerManyIterator) nextMany(hs uint32, buf []uint32) int {
n := 0
base := bcmi.base
bitset := bcmi.bitset
for n < len(buf) {
if bitset == 0 {
base += 1
if base >= len(bcmi.ptr.bitmap) {
bcmi.base = base
bcmi.bitset = bitset
return n
}
bitset = bcmi.ptr.bitmap[base]
continue
}
t := bitset & -bitset
buf[n] = uint32(((base * 64) + int(popcount(t-1)))) | hs
n = n + 1
bitset ^= t
}
bcmi.base = base
bcmi.bitset = bitset
return n
}
func newBitmapContainerManyIterator(a *bitmapContainer) *bitmapContainerManyIterator {
return &bitmapContainerManyIterator{a, -1, 0}
}
func (bc *bitmapContainer) getManyIterator() manyIterable {
return newBitmapContainerManyIterator(bc)
}
func (bc *bitmapContainer) getSizeInBytes() int {
return len(bc.bitmap) * 8 // + bcBaseBytes
}
func (bc *bitmapContainer) serializedSizeInBytes() int {
//return bc.Msgsize()// NOO! This breaks GetSerializedSizeInBytes
return len(bc.bitmap) * 8
}
const bcBaseBytes = int(unsafe.Sizeof(bitmapContainer{}))
// bitmapContainer doesn't depend on card, always fully allocated
func bitmapContainerSizeInBytes() int {
return bcBaseBytes + (1<<16)/8
}
func bitmapEquals(a, b []uint64) bool {
if len(a) != len(b) {
//p("bitmaps differ on length. len(a)=%v; len(b)=%v", len(a), len(b))
return false
}
for i, v := range a {
if v != b[i] {
//p("bitmaps differ on element i=%v", i)
return false
}
}
//p("bitmapEquals returning true")
return true
}
func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
// TODO: should be written as optimized assembly
pos := i
base := mask
for k := 0; k < len(bc.bitmap); k++ {
bitset := bc.bitmap[k]
for bitset != 0 {
t := bitset & -bitset
x[pos] = base + uint32(popcount(t-1))
pos++
bitset ^= t
}
base += 64
}
}
func (bc *bitmapContainer) equals(o container) bool {
srb, ok := o.(*bitmapContainer)
if ok {
//p("bitmapContainers.equals: both are bitmapContainers")
if srb.cardinality != bc.cardinality {
//p("bitmapContainers.equals: card differs: %v vs %v", srb.cardinality, bc.cardinality)
return false
}
return bitmapEquals(bc.bitmap, srb.bitmap)
}
// use generic comparison
if bc.getCardinality() != o.getCardinality() {
return false
}
ait := o.getShortIterator()
bit := bc.getShortIterator()
for ait.hasNext() {
if bit.next() != ait.next() {
return false
}
}
return true
}
func (bc *bitmapContainer) iaddReturnMinimized(i uint16) container {
bc.iadd(i)
if bc.isFull() {
return newRunContainer16Range(0, MaxUint16)
}
return bc
}
func (bc *bitmapContainer) iadd(i uint16) bool {
x := int(i)
previous := bc.bitmap[x/64]
mask := uint64(1) << (uint(x) % 64)
newb := previous | mask
bc.bitmap[x/64] = newb
bc.cardinality += int((previous ^ newb) >> (uint(x) % 64))
return newb != previous
}
func (bc *bitmapContainer) iremoveReturnMinimized(i uint16) container {
if bc.iremove(i) {
if bc.cardinality == arrayDefaultMaxSize {
return bc.toArrayContainer()
}
}
return bc
}
// iremove returns true if i was found.
func (bc *bitmapContainer) iremove(i uint16) bool {
/* branchless code
w := bc.bitmap[i>>6]
mask := uint64(1) << (i % 64)
neww := w &^ mask
bc.cardinality -= int((w ^ neww) >> (i % 64))
bc.bitmap[i>>6] = neww */
if bc.contains(i) {
bc.cardinality--
bc.bitmap[i/64] &^= (uint64(1) << (i % 64))
return true
}
return false
}
func (bc *bitmapContainer) isFull() bool {
return bc.cardinality == int(MaxUint16)+1
}
func (bc *bitmapContainer) getCardinality() int {
return bc.cardinality
}
func (bc *bitmapContainer) clone() container {
ptr := bitmapContainer{bc.cardinality, make([]uint64, len(bc.bitmap))}
copy(ptr.bitmap, bc.bitmap[:])
return &ptr
}
// add all values in range [firstOfRange,lastOfRange)
func (bc *bitmapContainer) iaddRange(firstOfRange, lastOfRange int) container {
bc.cardinality += setBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, lastOfRange)
return bc
}
// remove all values in range [firstOfRange,lastOfRange)
func (bc *bitmapContainer) iremoveRange(firstOfRange, lastOfRange int) container {
bc.cardinality += resetBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, lastOfRange)
if bc.getCardinality() <= arrayDefaultMaxSize {
return bc.toArrayContainer()
}
return bc
}
// flip all values in range [firstOfRange,endx)
func (bc *bitmapContainer) inot(firstOfRange, endx int) container {
p("bc.inot() called with [%v, %v)", firstOfRange, endx)
if endx-firstOfRange == maxCapacity {
//p("endx-firstOfRange == maxCapacity")
flipBitmapRange(bc.bitmap, firstOfRange, endx)
bc.cardinality = maxCapacity - bc.cardinality
//p("bc.cardinality is now %v", bc.cardinality)
} else if endx-firstOfRange > maxCapacity/2 {
//p("endx-firstOfRange > maxCapacity/2")
flipBitmapRange(bc.bitmap, firstOfRange, endx)
bc.computeCardinality()
} else {
bc.cardinality += flipBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, endx)
}
if bc.getCardinality() <= arrayDefaultMaxSize {
return bc.toArrayContainer()
}
return bc
}
// flip all values in range [firstOfRange,endx)
func (bc *bitmapContainer) not(firstOfRange, endx int) container {
answer := bc.clone()
return answer.inot(firstOfRange, endx)
}
func (bc *bitmapContainer) or(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.orArray(x)
case *bitmapContainer:
return bc.orBitmap(x)
case *runContainer16:
if x.isFull() {
return x.clone()
}
return x.orBitmapContainer(bc)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) orCardinality(a container) int {
switch x := a.(type) {
case *arrayContainer:
return bc.orArrayCardinality(x)
case *bitmapContainer:
return bc.orBitmapCardinality(x)
case *runContainer16:
return x.orBitmapContainerCardinality(bc)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) ior(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.iorArray(x)
case *bitmapContainer:
return bc.iorBitmap(x)
case *runContainer16:
if x.isFull() {
return x.clone()
}
for i := range x.iv {
bc.iaddRange(int(x.iv[i].start), int(x.iv[i].last())+1)
}
if bc.isFull() {
return newRunContainer16Range(0, MaxUint16)
}
//bc.computeCardinality()
return bc
}
panic(fmt.Errorf("unsupported container type %T", a))
}
func (bc *bitmapContainer) lazyIOR(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.lazyIORArray(x)
case *bitmapContainer:
return bc.lazyIORBitmap(x)
case *runContainer16:
if x.isFull() {
return x.clone()
}
// Manually inlined setBitmapRange function
bitmap := bc.bitmap
for _, iv := range x.iv {
start := int(iv.start)
end := int(iv.last()) + 1
if start >= end {
continue
}
firstword := start / 64
endword := (end - 1) / 64
if firstword == endword {
bitmap[firstword] |= (^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64))
continue
}
bitmap[firstword] |= ^uint64(0) << uint(start%64)
for i := firstword + 1; i < endword; i++ {
bitmap[i] = ^uint64(0)
}
bitmap[endword] |= ^uint64(0) >> (uint(-end) % 64)
}
bc.cardinality = invalidCardinality
return bc
}
panic("unsupported container type")
}
func (bc *bitmapContainer) lazyOR(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.lazyORArray(x)
case *bitmapContainer:
return bc.lazyORBitmap(x)
case *runContainer16:
if x.isFull() {
return x.clone()
}
// TODO: implement lazy OR
return x.orBitmapContainer(bc)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) orArray(value2 *arrayContainer) container {
answer := bc.clone().(*bitmapContainer)
c := value2.getCardinality()
for k := 0; k < c; k++ {
v := value2.content[k]
i := uint(v) >> 6
bef := answer.bitmap[i]
aft := bef | (uint64(1) << (v % 64))
answer.bitmap[i] = aft
answer.cardinality += int((bef - aft) >> 63)
}
return answer
}
func (bc *bitmapContainer) orArrayCardinality(value2 *arrayContainer) int {
answer := 0
c := value2.getCardinality()
for k := 0; k < c; k++ {
// branchless:
v := value2.content[k]
i := uint(v) >> 6
bef := bc.bitmap[i]
aft := bef | (uint64(1) << (v % 64))
answer += int((bef - aft) >> 63)
}
return answer
}
func (bc *bitmapContainer) orBitmap(value2 *bitmapContainer) container {
answer := newBitmapContainer()
for k := 0; k < len(answer.bitmap); k++ {
answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k]
}
answer.computeCardinality()
if answer.isFull() {
return newRunContainer16Range(0, MaxUint16)
}
return answer
}
func (bc *bitmapContainer) orBitmapCardinality(value2 *bitmapContainer) int {
return int(popcntOrSlice(bc.bitmap, value2.bitmap))
}
func (bc *bitmapContainer) andBitmapCardinality(value2 *bitmapContainer) int {
return int(popcntAndSlice(bc.bitmap, value2.bitmap))
}
func (bc *bitmapContainer) computeCardinality() {
bc.cardinality = int(popcntSlice(bc.bitmap))
}
func (bc *bitmapContainer) iorArray(ac *arrayContainer) container {
for k := range ac.content {
vc := ac.content[k]
i := uint(vc) >> 6
bef := bc.bitmap[i]
aft := bef | (uint64(1) << (vc % 64))
bc.bitmap[i] = aft
bc.cardinality += int((bef - aft) >> 63)
}
if bc.isFull() {
return newRunContainer16Range(0, MaxUint16)
}
return bc
}
func (bc *bitmapContainer) iorBitmap(value2 *bitmapContainer) container {
answer := bc
answer.cardinality = 0
for k := 0; k < len(answer.bitmap); k++ {
answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k]
}
answer.computeCardinality()
if bc.isFull() {
return newRunContainer16Range(0, MaxUint16)
}
return answer
}
func (bc *bitmapContainer) lazyIORArray(value2 *arrayContainer) container {
answer := bc
c := value2.getCardinality()
for k := 0; k < c; k++ {
vc := value2.content[k]
i := uint(vc) >> 6
answer.bitmap[i] = answer.bitmap[i] | (uint64(1) << (vc % 64))
}
answer.cardinality = invalidCardinality
return answer
}
func (bc *bitmapContainer) lazyORArray(value2 *arrayContainer) container {
answer := bc.clone().(*bitmapContainer)
return answer.lazyIORArray(value2)
}
func (bc *bitmapContainer) lazyIORBitmap(value2 *bitmapContainer) container {
answer := bc
for k := 0; k < len(answer.bitmap); k++ {
answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k]
}
bc.cardinality = invalidCardinality
return answer
}
func (bc *bitmapContainer) lazyORBitmap(value2 *bitmapContainer) container {
answer := bc.clone().(*bitmapContainer)
return answer.lazyIORBitmap(value2)
}
func (bc *bitmapContainer) xor(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.xorArray(x)
case *bitmapContainer:
return bc.xorBitmap(x)
case *runContainer16:
return x.xorBitmap(bc)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) xorArray(value2 *arrayContainer) container {
answer := bc.clone().(*bitmapContainer)
c := value2.getCardinality()
for k := 0; k < c; k++ {
vc := value2.content[k]
index := uint(vc) >> 6
abi := answer.bitmap[index]
mask := uint64(1) << (vc % 64)
answer.cardinality += 1 - 2*int((abi&mask)>>(vc%64))
answer.bitmap[index] = abi ^ mask
}
if answer.cardinality <= arrayDefaultMaxSize {
return answer.toArrayContainer()
}
return answer
}
func (bc *bitmapContainer) rank(x uint16) int {
// TODO: rewrite in assembly
leftover := (uint(x) + 1) & 63
if leftover == 0 {
return int(popcntSlice(bc.bitmap[:(uint(x)+1)/64]))
}
return int(popcntSlice(bc.bitmap[:(uint(x)+1)/64]) + popcount(bc.bitmap[(uint(x)+1)/64]<<(64-leftover)))
}
func (bc *bitmapContainer) selectInt(x uint16) int {
remaining := x
for k := 0; k < len(bc.bitmap); k++ {
w := popcount(bc.bitmap[k])
if uint16(w) > remaining {
return k*64 + selectBitPosition(bc.bitmap[k], int(remaining))
}
remaining -= uint16(w)
}
return -1
}
func (bc *bitmapContainer) xorBitmap(value2 *bitmapContainer) container {
newCardinality := int(popcntXorSlice(bc.bitmap, value2.bitmap))
if newCardinality > arrayDefaultMaxSize {
answer := newBitmapContainer()
for k := 0; k < len(answer.bitmap); k++ {
answer.bitmap[k] = bc.bitmap[k] ^ value2.bitmap[k]
}
answer.cardinality = newCardinality
if answer.isFull() {
return newRunContainer16Range(0, MaxUint16)
}
return answer
}
ac := newArrayContainerSize(newCardinality)
fillArrayXOR(ac.content, bc.bitmap, value2.bitmap)
ac.content = ac.content[:newCardinality]
return ac
}
func (bc *bitmapContainer) and(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.andArray(x)
case *bitmapContainer:
return bc.andBitmap(x)
case *runContainer16:
if x.isFull() {
return bc.clone()
}
return x.andBitmapContainer(bc)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) andCardinality(a container) int {
switch x := a.(type) {
case *arrayContainer:
return bc.andArrayCardinality(x)
case *bitmapContainer:
return bc.andBitmapCardinality(x)
case *runContainer16:
return x.andBitmapContainerCardinality(bc)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) intersects(a container) bool {
switch x := a.(type) {
case *arrayContainer:
return bc.intersectsArray(x)
case *bitmapContainer:
return bc.intersectsBitmap(x)
case *runContainer16:
return x.intersects(bc)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) iand(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.iandArray(x)
case *bitmapContainer:
return bc.iandBitmap(x)
case *runContainer16:
if x.isFull() {
return bc.clone()
}
return bc.iandRun16(x)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) iandRun16(rc *runContainer16) container {
rcb := newBitmapContainerFromRun(rc)
return bc.iandBitmap(rcb)
}
func (bc *bitmapContainer) iandArray(ac *arrayContainer) container {
acb := ac.toBitmapContainer()
return bc.iandBitmap(acb)
}
func (bc *bitmapContainer) andArray(value2 *arrayContainer) *arrayContainer {
answer := newArrayContainerCapacity(len(value2.content))
answer.content = answer.content[:cap(answer.content)]
c := value2.getCardinality()
pos := 0
for k := 0; k < c; k++ {
v := value2.content[k]
answer.content[pos] = v
pos += int(bc.bitValue(v))
}
answer.content = answer.content[:pos]
return answer
}
func (bc *bitmapContainer) andArrayCardinality(value2 *arrayContainer) int {
c := value2.getCardinality()
pos := 0
for k := 0; k < c; k++ {
v := value2.content[k]
pos += int(bc.bitValue(v))
}
return pos
}
func (bc *bitmapContainer) getCardinalityInRange(start, end uint) int {
if start >= end {
return 0
}
firstword := start / 64
endword := (end - 1) / 64
const allones = ^uint64(0)
if firstword == endword {
return int(popcount(bc.bitmap[firstword] & ((allones << (start % 64)) & (allones >> ((64 - end) & 63)))))
}
answer := popcount(bc.bitmap[firstword] & (allones << (start % 64)))
answer += popcntSlice(bc.bitmap[firstword+1 : endword])
answer += popcount(bc.bitmap[endword] & (allones >> ((64 - end) & 63)))
return int(answer)
}
func (bc *bitmapContainer) andBitmap(value2 *bitmapContainer) container {
newcardinality := int(popcntAndSlice(bc.bitmap, value2.bitmap))
if newcardinality > arrayDefaultMaxSize {
answer := newBitmapContainer()
for k := 0; k < len(answer.bitmap); k++ {
answer.bitmap[k] = bc.bitmap[k] & value2.bitmap[k]
}
answer.cardinality = newcardinality
return answer
}
ac := newArrayContainerSize(newcardinality)
fillArrayAND(ac.content, bc.bitmap, value2.bitmap)
ac.content = ac.content[:newcardinality] //not sure why i need this
return ac
}
func (bc *bitmapContainer) intersectsArray(value2 *arrayContainer) bool {
c := value2.getCardinality()
for k := 0; k < c; k++ {
v := value2.content[k]
if bc.contains(v) {
return true
}
}
return false
}
func (bc *bitmapContainer) intersectsBitmap(value2 *bitmapContainer) bool {
for k := 0; k < len(bc.bitmap); k++ {
if (bc.bitmap[k] & value2.bitmap[k]) != 0 {
return true
}
}
return false
}
func (bc *bitmapContainer) iandBitmap(value2 *bitmapContainer) container {
newcardinality := int(popcntAndSlice(bc.bitmap, value2.bitmap))
for k := 0; k < len(bc.bitmap); k++ {
bc.bitmap[k] = bc.bitmap[k] & value2.bitmap[k]
}
bc.cardinality = newcardinality
if newcardinality <= arrayDefaultMaxSize {
return newArrayContainerFromBitmap(bc)
}
return bc
}
func (bc *bitmapContainer) andNot(a container) container {
switch x := a.(type) {
case *arrayContainer:
return bc.andNotArray(x)
case *bitmapContainer:
return bc.andNotBitmap(x)
case *runContainer16:
return bc.andNotRun16(x)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) andNotRun16(rc *runContainer16) container {
rcb := rc.toBitmapContainer()
return bc.andNotBitmap(rcb)
}
func (bc *bitmapContainer) iandNot(a container) container {
//p("bitmapContainer.iandNot() starting")
switch x := a.(type) {
case *arrayContainer:
return bc.iandNotArray(x)
case *bitmapContainer:
return bc.iandNotBitmapSurely(x)
case *runContainer16:
return bc.iandNotRun16(x)
}
panic("unsupported container type")
}
func (bc *bitmapContainer) iandNotArray(ac *arrayContainer) container {
acb := ac.toBitmapContainer()
return bc.iandNotBitmapSurely(acb)
}
func (bc *bitmapContainer) iandNotRun16(rc *runContainer16) container {
rcb := rc.toBitmapContainer()
return bc.iandNotBitmapSurely(rcb)
}
func (bc *bitmapContainer) andNotArray(value2 *arrayContainer) container {
answer := bc.clone().(*bitmapContainer)
c := value2.getCardinality()
for k := 0; k < c; k++ {
vc := value2.content[k]
i := uint(vc) >> 6
oldv := answer.bitmap[i]
newv := oldv &^ (uint64(1) << (vc % 64))
answer.bitmap[i] = newv
answer.cardinality -= int((oldv ^ newv) >> (vc % 64))
}
if answer.cardinality <= arrayDefaultMaxSize {
return answer.toArrayContainer()
}
return answer
}
func (bc *bitmapContainer) andNotBitmap(value2 *bitmapContainer) container {
newCardinality := int(popcntMaskSlice(bc.bitmap, value2.bitmap))
if newCardinality > arrayDefaultMaxSize {
answer := newBitmapContainer()
for k := 0; k < len(answer.bitmap); k++ {
answer.bitmap[k] = bc.bitmap[k] &^ value2.bitmap[k]
}
answer.cardinality = newCardinality
return answer
}
ac := newArrayContainerSize(newCardinality)
fillArrayANDNOT(ac.content, bc.bitmap, value2.bitmap)
return ac
}
func (bc *bitmapContainer) iandNotBitmapSurely(value2 *bitmapContainer) *bitmapContainer {
newCardinality := int(popcntMaskSlice(bc.bitmap, value2.bitmap))
for k := 0; k < len(bc.bitmap); k++ {
bc.bitmap[k] = bc.bitmap[k] &^ value2.bitmap[k]
}
bc.cardinality = newCardinality
return bc
}
func (bc *bitmapContainer) contains(i uint16) bool { //testbit
x := uint(i)
w := bc.bitmap[x>>6]
mask := uint64(1) << (x & 63)
return (w & mask) != 0
}
func (bc *bitmapContainer) bitValue(i uint16) uint64 {
x := uint(i)
w := bc.bitmap[x>>6]
return (w >> (x & 63)) & 1
}
func (bc *bitmapContainer) loadData(arrayContainer *arrayContainer) {
bc.cardinality = arrayContainer.getCardinality()
c := arrayContainer.getCardinality()
for k := 0; k < c; k++ {
x := arrayContainer.content[k]
i := int(x) / 64
bc.bitmap[i] |= (uint64(1) << uint(x%64))
}
}
func (bc *bitmapContainer) toArrayContainer() *arrayContainer {
ac := &arrayContainer{}
ac.loadData(bc)
return ac
}
func (bc *bitmapContainer) fillArray(container []uint16) {
//TODO: rewrite in assembly
pos := 0
base := 0
for k := 0; k < len(bc.bitmap); k++ {
bitset := bc.bitmap[k]
for bitset != 0 {
t := bitset & -bitset
container[pos] = uint16((base + int(popcount(t-1))))
pos = pos + 1
bitset ^= t
}
base += 64
}
}
func (bc *bitmapContainer) NextSetBit(i int) int {
x := i / 64
if x >= len(bc.bitmap) {
return -1
}
w := bc.bitmap[x]
w = w >> uint(i%64)
if w != 0 {
return i + countTrailingZeros(w)
}
x++
for ; x < len(bc.bitmap); x++ {
if bc.bitmap[x] != 0 {
return (x * 64) + countTrailingZeros(bc.bitmap[x])
}
}
return -1
}
// reference the java implementation
// https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/BitmapContainer.java#L875-L892
//
func (bc *bitmapContainer) numberOfRuns() int {
if bc.cardinality == 0 {
return 0
}
var numRuns uint64
nextWord := bc.bitmap[0]
for i := 0; i < len(bc.bitmap)-1; i++ {
word := nextWord
nextWord = bc.bitmap[i+1]
numRuns += popcount((^word)&(word<<1)) + ((word >> 63) &^ nextWord)
}
word := nextWord
numRuns += popcount((^word) & (word << 1))
if (word & 0x8000000000000000) != 0 {
numRuns++
}
return int(numRuns)
}
// convert to run or array *if needed*
func (bc *bitmapContainer) toEfficientContainer() container {
numRuns := bc.numberOfRuns()
sizeAsRunContainer := runContainer16SerializedSizeInBytes(numRuns)
sizeAsBitmapContainer := bitmapContainerSizeInBytes()
card := bc.getCardinality()
sizeAsArrayContainer := arrayContainerSizeInBytes(card)
if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
return newRunContainer16FromBitmapContainer(bc)
}
if card <= arrayDefaultMaxSize {
return bc.toArrayContainer()
}
return bc
}
func newBitmapContainerFromRun(rc *runContainer16) *bitmapContainer {
if len(rc.iv) == 1 {
return newBitmapContainerwithRange(int(rc.iv[0].start), int(rc.iv[0].last()))
}
bc := newBitmapContainer()
for i := range rc.iv {
setBitmapRange(bc.bitmap, int(rc.iv[i].start), int(rc.iv[i].last())+1)
bc.cardinality += int(rc.iv[i].last()) + 1 - int(rc.iv[i].start)
}
//bc.computeCardinality()
return bc
}
func (bc *bitmapContainer) containerType() contype {
return bitmapContype
}

View File

@ -0,0 +1,415 @@
package roaring
// NOTE: THIS FILE WAS PRODUCED BY THE
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
// DO NOT EDIT
import "github.com/tinylib/msgp/msgp"
// DecodeMsg implements msgp.Decodable
func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zbzg uint32
zbzg, err = dc.ReadMapHeader()
if err != nil {
return
}
for zbzg > 0 {
zbzg--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "cardinality":
z.cardinality, err = dc.ReadInt()
if err != nil {
return
}
case "bitmap":
var zbai uint32
zbai, err = dc.ReadArrayHeader()
if err != nil {
return
}
if cap(z.bitmap) >= int(zbai) {
z.bitmap = (z.bitmap)[:zbai]
} else {
z.bitmap = make([]uint64, zbai)
}
for zxvk := range z.bitmap {
z.bitmap[zxvk], err = dc.ReadUint64()
if err != nil {
return
}
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "cardinality"
err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
if err != nil {
return err
}
err = en.WriteInt(z.cardinality)
if err != nil {
return
}
// write "bitmap"
err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
if err != nil {
return err
}
err = en.WriteArrayHeader(uint32(len(z.bitmap)))
if err != nil {
return
}
for zxvk := range z.bitmap {
err = en.WriteUint64(z.bitmap[zxvk])
if err != nil {
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "cardinality"
o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
o = msgp.AppendInt(o, z.cardinality)
// string "bitmap"
o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
o = msgp.AppendArrayHeader(o, uint32(len(z.bitmap)))
for zxvk := range z.bitmap {
o = msgp.AppendUint64(o, z.bitmap[zxvk])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zcmr uint32
zcmr, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for zcmr > 0 {
zcmr--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "cardinality":
z.cardinality, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
return
}
case "bitmap":
var zajw uint32
zajw, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
if cap(z.bitmap) >= int(zajw) {
z.bitmap = (z.bitmap)[:zajw]
} else {
z.bitmap = make([]uint64, zajw)
}
for zxvk := range z.bitmap {
z.bitmap[zxvk], bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *bitmapContainer) Msgsize() (s int) {
s = 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.bitmap) * (msgp.Uint64Size))
return
}
// DecodeMsg implements msgp.Decodable
func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zhct uint32
zhct, err = dc.ReadMapHeader()
if err != nil {
return
}
for zhct > 0 {
zhct--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "ptr":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
return
}
z.ptr = nil
} else {
if z.ptr == nil {
z.ptr = new(bitmapContainer)
}
var zcua uint32
zcua, err = dc.ReadMapHeader()
if err != nil {
return
}
for zcua > 0 {
zcua--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "cardinality":
z.ptr.cardinality, err = dc.ReadInt()
if err != nil {
return
}
case "bitmap":
var zxhx uint32
zxhx, err = dc.ReadArrayHeader()
if err != nil {
return
}
if cap(z.ptr.bitmap) >= int(zxhx) {
z.ptr.bitmap = (z.ptr.bitmap)[:zxhx]
} else {
z.ptr.bitmap = make([]uint64, zxhx)
}
for zwht := range z.ptr.bitmap {
z.ptr.bitmap[zwht], err = dc.ReadUint64()
if err != nil {
return
}
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
}
case "i":
z.i, err = dc.ReadInt()
if err != nil {
return
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "ptr"
err = en.Append(0x82, 0xa3, 0x70, 0x74, 0x72)
if err != nil {
return err
}
if z.ptr == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
// map header, size 2
// write "cardinality"
err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
if err != nil {
return err
}
err = en.WriteInt(z.ptr.cardinality)
if err != nil {
return
}
// write "bitmap"
err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
if err != nil {
return err
}
err = en.WriteArrayHeader(uint32(len(z.ptr.bitmap)))
if err != nil {
return
}
for zwht := range z.ptr.bitmap {
err = en.WriteUint64(z.ptr.bitmap[zwht])
if err != nil {
return
}
}
}
// write "i"
err = en.Append(0xa1, 0x69)
if err != nil {
return err
}
err = en.WriteInt(z.i)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "ptr"
o = append(o, 0x82, 0xa3, 0x70, 0x74, 0x72)
if z.ptr == nil {
o = msgp.AppendNil(o)
} else {
// map header, size 2
// string "cardinality"
o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
o = msgp.AppendInt(o, z.ptr.cardinality)
// string "bitmap"
o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
o = msgp.AppendArrayHeader(o, uint32(len(z.ptr.bitmap)))
for zwht := range z.ptr.bitmap {
o = msgp.AppendUint64(o, z.ptr.bitmap[zwht])
}
}
// string "i"
o = append(o, 0xa1, 0x69)
o = msgp.AppendInt(o, z.i)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zlqf uint32
zlqf, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for zlqf > 0 {
zlqf--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "ptr":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.ptr = nil
} else {
if z.ptr == nil {
z.ptr = new(bitmapContainer)
}
var zdaf uint32
zdaf, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for zdaf > 0 {
zdaf--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "cardinality":
z.ptr.cardinality, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
return
}
case "bitmap":
var zpks uint32
zpks, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
if cap(z.ptr.bitmap) >= int(zpks) {
z.ptr.bitmap = (z.ptr.bitmap)[:zpks]
} else {
z.ptr.bitmap = make([]uint64, zpks)
}
for zwht := range z.ptr.bitmap {
z.ptr.bitmap[zwht], bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
}
case "i":
z.i, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *bitmapContainerShortIterator) Msgsize() (s int) {
s = 1 + 4
if z.ptr == nil {
s += msgp.NilSize
} else {
s += 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ptr.bitmap) * (msgp.Uint64Size))
}
s += 2 + msgp.IntSize
return
}

11
vendor/github.com/RoaringBitmap/roaring/ctz.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build go1.9
// "go1.9", from Go version 1.9 onward
// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
package roaring
import "math/bits"
func countTrailingZeros(x uint64) int {
return bits.TrailingZeros64(x)
}

71
vendor/github.com/RoaringBitmap/roaring/ctz_compat.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
// +build !go1.9
package roaring
// Reuse of portions of go/src/math/big standard lib code
// under this license:
/*
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
const deBruijn32 = 0x077CB531
var deBruijn32Lookup = []byte{
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
}
const deBruijn64 = 0x03f79d71b4ca8b09
var deBruijn64Lookup = []byte{
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
}
// trailingZeroBits returns the number of consecutive least significant zero
// bits of x.
func countTrailingZeros(x uint64) int {
// x & -x leaves only the right-most bit set in the word. Let k be the
// index of that bit. Since only a single bit is set, the value is two
// to the power of k. Multiplying by a power of two is equivalent to
// left shifting, in this case by k bits. The de Bruijn constant is
// such that all six bit, consecutive substrings are distinct.
// Therefore, if we have a left shifted version of this constant we can
// find by how many bits it was shifted by looking at which six bit
// substring ended up at the top of the word.
// (Knuth, volume 4, section 7.3.1)
if x == 0 {
// We have to special case 0; the fomula
// below doesn't work for 0.
return 64
}
return int(deBruijn64Lookup[((x&-x)*(deBruijn64))>>58])
}

View File

@ -0,0 +1,215 @@
package roaring
import (
"container/heap"
)
// Or function that requires repairAfterLazy
func lazyOR(x1, x2 *Bitmap) *Bitmap {
answer := NewBitmap()
pos1 := 0
pos2 := 0
length1 := x1.highlowcontainer.size()
length2 := x2.highlowcontainer.size()
main:
for (pos1 < length1) && (pos2 < length2) {
s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
for {
if s1 < s2 {
answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1)
pos1++
if pos1 == length1 {
break main
}
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
} else if s1 > s2 {
answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2)
pos2++
if pos2 == length2 {
break main
}
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
} else {
c1 := x1.highlowcontainer.getContainerAtIndex(pos1)
switch t := c1.(type) {
case *arrayContainer:
c1 = t.toBitmapContainer()
case *runContainer16:
if !t.isFull() {
c1 = t.toBitmapContainer()
}
}
answer.highlowcontainer.appendContainer(s1, c1.lazyOR(x2.highlowcontainer.getContainerAtIndex(pos2)), false)
pos1++
pos2++
if (pos1 == length1) || (pos2 == length2) {
break main
}
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
}
}
}
if pos1 == length1 {
answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
} else if pos2 == length2 {
answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1)
}
return answer
}
// In-place Or function that requires repairAfterLazy
func (x1 *Bitmap) lazyOR(x2 *Bitmap) *Bitmap {
pos1 := 0
pos2 := 0
length1 := x1.highlowcontainer.size()
length2 := x2.highlowcontainer.size()
main:
for (pos1 < length1) && (pos2 < length2) {
s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
for {
if s1 < s2 {
pos1++
if pos1 == length1 {
break main
}
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
} else if s1 > s2 {
x1.highlowcontainer.insertNewKeyValueAt(pos1, s2, x2.highlowcontainer.getContainerAtIndex(pos2).clone())
pos2++
pos1++
length1++
if pos2 == length2 {
break main
}
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
} else {
c1 := x1.highlowcontainer.getContainerAtIndex(pos1)
switch t := c1.(type) {
case *arrayContainer:
c1 = t.toBitmapContainer()
case *runContainer16:
if !t.isFull() {
c1 = t.toBitmapContainer()
}
case *bitmapContainer:
c1 = x1.highlowcontainer.getWritableContainerAtIndex(pos1)
}
x1.highlowcontainer.containers[pos1] = c1.lazyIOR(x2.highlowcontainer.getContainerAtIndex(pos2))
x1.highlowcontainer.needCopyOnWrite[pos1] = false
pos1++
pos2++
if (pos1 == length1) || (pos2 == length2) {
break main
}
s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
}
}
}
if pos1 == length1 {
x1.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
}
return x1
}
// to be called after lazy aggregates
func (x1 *Bitmap) repairAfterLazy() {
for pos := 0; pos < x1.highlowcontainer.size(); pos++ {
c := x1.highlowcontainer.getContainerAtIndex(pos)
switch c.(type) {
case *bitmapContainer:
if c.(*bitmapContainer).cardinality == invalidCardinality {
c = x1.highlowcontainer.getWritableContainerAtIndex(pos)
c.(*bitmapContainer).computeCardinality()
if c.(*bitmapContainer).getCardinality() <= arrayDefaultMaxSize {
x1.highlowcontainer.setContainerAtIndex(pos, c.(*bitmapContainer).toArrayContainer())
} else if c.(*bitmapContainer).isFull() {
x1.highlowcontainer.setContainerAtIndex(pos, newRunContainer16Range(0, MaxUint16))
}
}
}
}
}
// FastAnd computes the intersection between many bitmaps quickly
// Compared to the And function, it can take many bitmaps as input, thus saving the trouble
// of manually calling "And" many times.
func FastAnd(bitmaps ...*Bitmap) *Bitmap {
if len(bitmaps) == 0 {
return NewBitmap()
} else if len(bitmaps) == 1 {
return bitmaps[0].Clone()
}
answer := And(bitmaps[0], bitmaps[1])
for _, bm := range bitmaps[2:] {
answer.And(bm)
}
return answer
}
// FastOr computes the union between many bitmaps quickly, as opposed to having to call Or repeatedly.
// It might also be faster than calling Or repeatedly.
func FastOr(bitmaps ...*Bitmap) *Bitmap {
if len(bitmaps) == 0 {
return NewBitmap()
} else if len(bitmaps) == 1 {
return bitmaps[0].Clone()
}
answer := lazyOR(bitmaps[0], bitmaps[1])
for _, bm := range bitmaps[2:] {
answer = answer.lazyOR(bm)
}
// here is where repairAfterLazy is called.
answer.repairAfterLazy()
return answer
}
// HeapOr computes the union between many bitmaps quickly using a heap.
// It might be faster than calling Or repeatedly.
func HeapOr(bitmaps ...*Bitmap) *Bitmap {
if len(bitmaps) == 0 {
return NewBitmap()
}
// TODO: for better speed, we could do the operation lazily, see Java implementation
pq := make(priorityQueue, len(bitmaps))
for i, bm := range bitmaps {
pq[i] = &item{bm, i}
}
heap.Init(&pq)
for pq.Len() > 1 {
x1 := heap.Pop(&pq).(*item)
x2 := heap.Pop(&pq).(*item)
heap.Push(&pq, &item{Or(x1.value, x2.value), 0})
}
return heap.Pop(&pq).(*item).value
}
// HeapXor computes the symmetric difference between many bitmaps quickly (as opposed to calling Xor repeated).
// Internally, this function uses a heap.
// It might be faster than calling Xor repeatedly.
func HeapXor(bitmaps ...*Bitmap) *Bitmap {
if len(bitmaps) == 0 {
return NewBitmap()
}
pq := make(priorityQueue, len(bitmaps))
for i, bm := range bitmaps {
pq[i] = &item{bm, i}
}
heap.Init(&pq)
for pq.Len() > 1 {
x1 := heap.Pop(&pq).(*item)
x2 := heap.Pop(&pq).(*item)
heap.Push(&pq, &item{Xor(x1.value, x2.value), 0})
}
return heap.Pop(&pq).(*item).value
}

View File

@ -0,0 +1,23 @@
package roaring
type manyIterable interface {
nextMany(hs uint32, buf []uint32) int
}
type manyIterator struct {
slice []uint16
loc int
}
func (si *manyIterator) nextMany(hs uint32, buf []uint32) int {
n := 0
l := si.loc
s := si.slice
for n < len(buf) && l < len(s) {
buf[n] = uint32(s[l]) | hs
l++
n++
}
si.loc = l
return n
}

613
vendor/github.com/RoaringBitmap/roaring/parallel.go generated vendored Normal file
View File

@ -0,0 +1,613 @@
package roaring
import (
"container/heap"
"fmt"
"runtime"
"sync"
)
var defaultWorkerCount = runtime.NumCPU()
type bitmapContainerKey struct {
key uint16
idx int
bitmap *Bitmap
}
type multipleContainers struct {
key uint16
containers []container
idx int
}
type keyedContainer struct {
key uint16
container container
idx int
}
type bitmapContainerHeap []bitmapContainerKey
func (h bitmapContainerHeap) Len() int { return len(h) }
func (h bitmapContainerHeap) Less(i, j int) bool { return h[i].key < h[j].key }
func (h bitmapContainerHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *bitmapContainerHeap) Push(x interface{}) {
// Push and Pop use pointer receivers because they modify the slice's length,
// not just its contents.
*h = append(*h, x.(bitmapContainerKey))
}
func (h *bitmapContainerHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
func (h bitmapContainerHeap) Peek() bitmapContainerKey {
return h[0]
}
func (h *bitmapContainerHeap) popIncrementing() (key uint16, container container) {
k := h.Peek()
key = k.key
container = k.bitmap.highlowcontainer.containers[k.idx]
newIdx := k.idx + 1
if newIdx < k.bitmap.highlowcontainer.size() {
k = bitmapContainerKey{
k.bitmap.highlowcontainer.keys[newIdx],
newIdx,
k.bitmap,
}
(*h)[0] = k
heap.Fix(h, 0)
} else {
heap.Pop(h)
}
return
}
func (h *bitmapContainerHeap) Next(containers []container) multipleContainers {
if h.Len() == 0 {
return multipleContainers{}
}
key, container := h.popIncrementing()
containers = append(containers, container)
for h.Len() > 0 && key == h.Peek().key {
_, container = h.popIncrementing()
containers = append(containers, container)
}
return multipleContainers{
key,
containers,
-1,
}
}
func newBitmapContainerHeap(bitmaps ...*Bitmap) bitmapContainerHeap {
// Initialize heap
var h bitmapContainerHeap = make([]bitmapContainerKey, 0, len(bitmaps))
for _, bitmap := range bitmaps {
if !bitmap.IsEmpty() {
key := bitmapContainerKey{
bitmap.highlowcontainer.keys[0],
0,
bitmap,
}
h = append(h, key)
}
}
heap.Init(&h)
return h
}
func repairAfterLazy(c container) container {
switch t := c.(type) {
case *bitmapContainer:
if t.cardinality == invalidCardinality {
t.computeCardinality()
}
if t.getCardinality() <= arrayDefaultMaxSize {
return t.toArrayContainer()
} else if c.(*bitmapContainer).isFull() {
return newRunContainer16Range(0, MaxUint16)
}
}
return c
}
func toBitmapContainer(c container) container {
switch t := c.(type) {
case *arrayContainer:
return t.toBitmapContainer()
case *runContainer16:
if !t.isFull() {
return t.toBitmapContainer()
}
}
return c
}
func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer, expectedKeysChan <-chan int) {
expectedKeys := -1
appendedKeys := 0
keys := make([]uint16, 0)
containers := make([]container, 0)
for appendedKeys != expectedKeys {
select {
case item := <-resultChan:
if len(keys) <= item.idx {
keys = append(keys, make([]uint16, item.idx-len(keys)+1)...)
containers = append(containers, make([]container, item.idx-len(containers)+1)...)
}
keys[item.idx] = item.key
containers[item.idx] = item.container
appendedKeys++
case msg := <-expectedKeysChan:
expectedKeys = msg
}
}
answer := &Bitmap{
roaringArray{
make([]uint16, 0, expectedKeys),
make([]container, 0, expectedKeys),
make([]bool, 0, expectedKeys),
false,
nil,
},
}
for i := range keys {
if containers[i] != nil { // in case a resulting container was empty, see ParAnd function
answer.highlowcontainer.appendContainer(keys[i], containers[i], false)
}
}
bitmapChan <- answer
}
// ParHeapOr computes the union (OR) of all provided bitmaps in parallel,
// where the parameter "parallelism" determines how many workers are to be used
// (if it is set to 0, a default number of workers is chosen)
// ParHeapOr uses a heap to compute the union. For rare cases it might be faster than ParOr
func ParHeapOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
bitmapCount := len(bitmaps)
if bitmapCount == 0 {
return NewBitmap()
} else if bitmapCount == 1 {
return bitmaps[0].Clone()
}
if parallelism == 0 {
parallelism = defaultWorkerCount
}
h := newBitmapContainerHeap(bitmaps...)
bitmapChan := make(chan *Bitmap)
inputChan := make(chan multipleContainers, 128)
resultChan := make(chan keyedContainer, 32)
expectedKeysChan := make(chan int)
pool := sync.Pool{
New: func() interface{} {
return make([]container, 0, len(bitmaps))
},
}
orFunc := func() {
// Assumes only structs with >=2 containers are passed
for input := range inputChan {
c := toBitmapContainer(input.containers[0]).lazyOR(input.containers[1])
for _, next := range input.containers[2:] {
c = c.lazyIOR(next)
}
c = repairAfterLazy(c)
kx := keyedContainer{
input.key,
c,
input.idx,
}
resultChan <- kx
pool.Put(input.containers[:0])
}
}
go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
for i := 0; i < parallelism; i++ {
go orFunc()
}
idx := 0
for h.Len() > 0 {
ck := h.Next(pool.Get().([]container))
if len(ck.containers) == 1 {
resultChan <- keyedContainer{
ck.key,
ck.containers[0],
idx,
}
pool.Put(ck.containers[:0])
} else {
ck.idx = idx
inputChan <- ck
}
idx++
}
expectedKeysChan <- idx
bitmap := <-bitmapChan
close(inputChan)
close(resultChan)
close(expectedKeysChan)
return bitmap
}
// ParAnd computes the intersection (AND) of all provided bitmaps in parallel,
// where the parameter "parallelism" determines how many workers are to be used
// (if it is set to 0, a default number of workers is chosen)
func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap {
bitmapCount := len(bitmaps)
if bitmapCount == 0 {
return NewBitmap()
} else if bitmapCount == 1 {
return bitmaps[0].Clone()
}
if parallelism == 0 {
parallelism = defaultWorkerCount
}
h := newBitmapContainerHeap(bitmaps...)
bitmapChan := make(chan *Bitmap)
inputChan := make(chan multipleContainers, 128)
resultChan := make(chan keyedContainer, 32)
expectedKeysChan := make(chan int)
andFunc := func() {
// Assumes only structs with >=2 containers are passed
for input := range inputChan {
c := input.containers[0].and(input.containers[1])
for _, next := range input.containers[2:] {
if c.getCardinality() == 0 {
break
}
c = c.iand(next)
}
// Send a nil explicitly if the result of the intersection is an empty container
if c.getCardinality() == 0 {
c = nil
}
kx := keyedContainer{
input.key,
c,
input.idx,
}
resultChan <- kx
}
}
go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
for i := 0; i < parallelism; i++ {
go andFunc()
}
idx := 0
for h.Len() > 0 {
ck := h.Next(make([]container, 0, 4))
if len(ck.containers) == bitmapCount {
ck.idx = idx
inputChan <- ck
idx++
}
}
expectedKeysChan <- idx
bitmap := <-bitmapChan
close(inputChan)
close(resultChan)
close(expectedKeysChan)
return bitmap
}
// ParOr computes the union (OR) of all provided bitmaps in parallel,
// where the parameter "parallelism" determines how many workers are to be used
// (if it is set to 0, a default number of workers is chosen)
func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
var lKey uint16 = MaxUint16
var hKey uint16 = 0
bitmapsFiltered := bitmaps[:0]
for _, b := range bitmaps {
if !b.IsEmpty() {
bitmapsFiltered = append(bitmapsFiltered, b)
}
}
bitmaps = bitmapsFiltered
for _, b := range bitmaps {
lKey = minOfUint16(lKey, b.highlowcontainer.keys[0])
hKey = maxOfUint16(hKey, b.highlowcontainer.keys[b.highlowcontainer.size()-1])
}
if lKey == MaxUint16 && hKey == 0 {
return New()
} else if len(bitmaps) == 1 {
return bitmaps[0]
}
keyRange := hKey - lKey + 1
if keyRange == 1 {
// revert to FastOr. Since the key range is 0
// no container-level aggregation parallelism is achievable
return FastOr(bitmaps...)
}
if parallelism == 0 {
parallelism = defaultWorkerCount
}
var chunkSize int
var chunkCount int
if parallelism*4 > int(keyRange) {
chunkSize = 1
chunkCount = int(keyRange)
} else {
chunkCount = parallelism * 4
chunkSize = (int(keyRange) + chunkCount - 1) / chunkCount
}
if chunkCount*chunkSize < int(keyRange) {
// it's fine to panic to indicate an implementation error
panic(fmt.Sprintf("invariant check failed: chunkCount * chunkSize < keyRange, %d * %d < %d", chunkCount, chunkSize, keyRange))
}
chunks := make([]*roaringArray, chunkCount)
chunkSpecChan := make(chan parChunkSpec, minOfInt(maxOfInt(64, 2*parallelism), int(chunkCount)))
chunkChan := make(chan parChunk, minOfInt(32, int(chunkCount)))
orFunc := func() {
for spec := range chunkSpecChan {
ra := lazyOrOnRange(&bitmaps[0].highlowcontainer, &bitmaps[1].highlowcontainer, spec.start, spec.end)
for _, b := range bitmaps[2:] {
ra = lazyIOrOnRange(ra, &b.highlowcontainer, spec.start, spec.end)
}
for i, c := range ra.containers {
ra.containers[i] = repairAfterLazy(c)
}
chunkChan <- parChunk{ra, spec.idx}
}
}
for i := 0; i < parallelism; i++ {
go orFunc()
}
go func() {
for i := 0; i < chunkCount; i++ {
spec := parChunkSpec{
start: uint16(int(lKey) + i*chunkSize),
end: uint16(minOfInt(int(lKey)+(i+1)*chunkSize-1, int(hKey))),
idx: int(i),
}
chunkSpecChan <- spec
}
}()
chunksRemaining := chunkCount
for chunk := range chunkChan {
chunks[chunk.idx] = chunk.ra
chunksRemaining--
if chunksRemaining == 0 {
break
}
}
close(chunkChan)
close(chunkSpecChan)
containerCount := 0
for _, chunk := range chunks {
containerCount += chunk.size()
}
result := Bitmap{
roaringArray{
containers: make([]container, containerCount),
keys: make([]uint16, containerCount),
needCopyOnWrite: make([]bool, containerCount),
},
}
resultOffset := 0
for _, chunk := range chunks {
copy(result.highlowcontainer.containers[resultOffset:], chunk.containers)
copy(result.highlowcontainer.keys[resultOffset:], chunk.keys)
copy(result.highlowcontainer.needCopyOnWrite[resultOffset:], chunk.needCopyOnWrite)
resultOffset += chunk.size()
}
return &result
}
type parChunkSpec struct {
start uint16
end uint16
idx int
}
type parChunk struct {
ra *roaringArray
idx int
}
func (c parChunk) size() int {
return c.ra.size()
}
func parNaiveStartAt(ra *roaringArray, start uint16, last uint16) int {
for idx, key := range ra.keys {
if key >= start && key <= last {
return idx
} else if key > last {
break
}
}
return ra.size()
}
func lazyOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
answer := newRoaringArray()
length1 := ra1.size()
length2 := ra2.size()
idx1 := parNaiveStartAt(ra1, start, last)
idx2 := parNaiveStartAt(ra2, start, last)
var key1 uint16
var key2 uint16
if idx1 < length1 && idx2 < length2 {
key1 = ra1.getKeyAtIndex(idx1)
key2 = ra2.getKeyAtIndex(idx2)
for key1 <= last && key2 <= last {
if key1 < key2 {
answer.appendCopy(*ra1, idx1)
idx1++
if idx1 == length1 {
break
}
key1 = ra1.getKeyAtIndex(idx1)
} else if key1 > key2 {
answer.appendCopy(*ra2, idx2)
idx2++
if idx2 == length2 {
break
}
key2 = ra2.getKeyAtIndex(idx2)
} else {
c1 := ra1.getFastContainerAtIndex(idx1, false)
answer.appendContainer(key1, c1.lazyOR(ra2.getContainerAtIndex(idx2)), false)
idx1++
idx2++
if idx1 == length1 || idx2 == length2 {
break
}
key1 = ra1.getKeyAtIndex(idx1)
key2 = ra2.getKeyAtIndex(idx2)
}
}
}
if idx2 < length2 {
key2 = ra2.getKeyAtIndex(idx2)
for key2 <= last {
answer.appendCopy(*ra2, idx2)
idx2++
if idx2 == length2 {
break
}
key2 = ra2.getKeyAtIndex(idx2)
}
}
if idx1 < length1 {
key1 = ra1.getKeyAtIndex(idx1)
for key1 <= last {
answer.appendCopy(*ra1, idx1)
idx1++
if idx1 == length1 {
break
}
key1 = ra1.getKeyAtIndex(idx1)
}
}
return answer
}
func lazyIOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
length1 := ra1.size()
length2 := ra2.size()
idx1 := 0
idx2 := parNaiveStartAt(ra2, start, last)
var key1 uint16
var key2 uint16
if idx1 < length1 && idx2 < length2 {
key1 = ra1.getKeyAtIndex(idx1)
key2 = ra2.getKeyAtIndex(idx2)
for key1 <= last && key2 <= last {
if key1 < key2 {
idx1++
if idx1 >= length1 {
break
}
key1 = ra1.getKeyAtIndex(idx1)
} else if key1 > key2 {
ra1.insertNewKeyValueAt(idx1, key2, ra2.getContainerAtIndex(idx2))
ra1.needCopyOnWrite[idx1] = true
idx2++
idx1++
length1++
if idx2 >= length2 {
break
}
key2 = ra2.getKeyAtIndex(idx2)
} else {
c1 := ra1.getFastContainerAtIndex(idx1, true)
ra1.containers[idx1] = c1.lazyIOR(ra2.getContainerAtIndex(idx2))
ra1.needCopyOnWrite[idx1] = false
idx1++
idx2++
if idx1 >= length1 || idx2 >= length2 {
break
}
key1 = ra1.getKeyAtIndex(idx1)
key2 = ra2.getKeyAtIndex(idx2)
}
}
}
if idx2 < length2 {
key2 = ra2.getKeyAtIndex(idx2)
for key2 <= last {
ra1.appendCopy(*ra2, idx2)
idx2++
if idx2 >= length2 {
break
}
key2 = ra2.getKeyAtIndex(idx2)
}
}
return ra1
}

11
vendor/github.com/RoaringBitmap/roaring/popcnt.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build go1.9
// "go1.9", from Go version 1.9 onward
// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
package roaring
import "math/bits"
func popcount(x uint64) uint64 {
return uint64(bits.OnesCount64(x))
}

103
vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s generated vendored Normal file
View File

@ -0,0 +1,103 @@
// +build amd64,!appengine,!go1.9
TEXT ·hasAsm(SB),4,$0-1
MOVQ $1, AX
CPUID
SHRQ $23, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
TEXT ·popcntSliceAsm(SB),4,$0-32
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntSliceEnd
popcntSliceLoop:
BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
ADDQ DX, AX
ADDQ $8, SI
LOOP popcntSliceLoop
popcntSliceEnd:
MOVQ AX, ret+24(FP)
RET
TEXT ·popcntMaskSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntMaskSliceEnd
MOVQ m+24(FP), DI
popcntMaskSliceLoop:
MOVQ (DI), DX
NOTQ DX
ANDQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntMaskSliceLoop
popcntMaskSliceEnd:
MOVQ AX, ret+48(FP)
RET
TEXT ·popcntAndSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntAndSliceEnd
MOVQ m+24(FP), DI
popcntAndSliceLoop:
MOVQ (DI), DX
ANDQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntAndSliceLoop
popcntAndSliceEnd:
MOVQ AX, ret+48(FP)
RET
TEXT ·popcntOrSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntOrSliceEnd
MOVQ m+24(FP), DI
popcntOrSliceLoop:
MOVQ (DI), DX
ORQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntOrSliceLoop
popcntOrSliceEnd:
MOVQ AX, ret+48(FP)
RET
TEXT ·popcntXorSliceAsm(SB),4,$0-56
XORQ AX, AX
MOVQ s+0(FP), SI
MOVQ s_len+8(FP), CX
TESTQ CX, CX
JZ popcntXorSliceEnd
MOVQ m+24(FP), DI
popcntXorSliceLoop:
MOVQ (DI), DX
XORQ (SI), DX
POPCNTQ_DX_DX
ADDQ DX, AX
ADDQ $8, SI
ADDQ $8, DI
LOOP popcntXorSliceLoop
popcntXorSliceEnd:
MOVQ AX, ret+48(FP)
RET

67
vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// +build amd64,!appengine,!go1.9
package roaring
// *** the following functions are defined in popcnt_amd64.s
//go:noescape
func hasAsm() bool
// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
var useAsm = hasAsm()
//go:noescape
func popcntSliceAsm(s []uint64) uint64
//go:noescape
func popcntMaskSliceAsm(s, m []uint64) uint64
//go:noescape
func popcntAndSliceAsm(s, m []uint64) uint64
//go:noescape
func popcntOrSliceAsm(s, m []uint64) uint64
//go:noescape
func popcntXorSliceAsm(s, m []uint64) uint64
func popcntSlice(s []uint64) uint64 {
if useAsm {
return popcntSliceAsm(s)
}
return popcntSliceGo(s)
}
func popcntMaskSlice(s, m []uint64) uint64 {
if useAsm {
return popcntMaskSliceAsm(s, m)
}
return popcntMaskSliceGo(s, m)
}
func popcntAndSlice(s, m []uint64) uint64 {
if useAsm {
return popcntAndSliceAsm(s, m)
}
return popcntAndSliceGo(s, m)
}
func popcntOrSlice(s, m []uint64) uint64 {
if useAsm {
return popcntOrSliceAsm(s, m)
}
return popcntOrSliceGo(s, m)
}
func popcntXorSlice(s, m []uint64) uint64 {
if useAsm {
return popcntXorSliceAsm(s, m)
}
return popcntXorSliceGo(s, m)
}

View File

@ -0,0 +1,17 @@
// +build !go1.9
package roaring
// bit population count, take from
// https://code.google.com/p/go/issues/detail?id=4988#c11
// credit: https://code.google.com/u/arnehormann/
// credit: https://play.golang.org/p/U7SogJ7psJ
// credit: http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
func popcount(x uint64) uint64 {
x -= (x >> 1) & 0x5555555555555555
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
x += x >> 4
x &= 0x0f0f0f0f0f0f0f0f
x *= 0x0101010101010101
return x >> 56
}

View File

@ -0,0 +1,23 @@
// +build !amd64 appengine go1.9
package roaring
func popcntSlice(s []uint64) uint64 {
return popcntSliceGo(s)
}
func popcntMaskSlice(s, m []uint64) uint64 {
return popcntMaskSliceGo(s, m)
}
func popcntAndSlice(s, m []uint64) uint64 {
return popcntAndSliceGo(s, m)
}
func popcntOrSlice(s, m []uint64) uint64 {
return popcntOrSliceGo(s, m)
}
func popcntXorSlice(s, m []uint64) uint64 {
return popcntXorSliceGo(s, m)
}

View File

@ -0,0 +1,41 @@
package roaring
func popcntSliceGo(s []uint64) uint64 {
cnt := uint64(0)
for _, x := range s {
cnt += popcount(x)
}
return cnt
}
func popcntMaskSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] &^ m[i])
}
return cnt
}
func popcntAndSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] & m[i])
}
return cnt
}
func popcntOrSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] | m[i])
}
return cnt
}
func popcntXorSliceGo(s, m []uint64) uint64 {
cnt := uint64(0)
for i := range s {
cnt += popcount(s[i] ^ m[i])
}
return cnt
}

View File

@ -0,0 +1,101 @@
package roaring
import "container/heap"
/////////////
// The priorityQueue is used to keep Bitmaps sorted.
////////////
type item struct {
value *Bitmap
index int
}
type priorityQueue []*item
func (pq priorityQueue) Len() int { return len(pq) }
func (pq priorityQueue) Less(i, j int) bool {
return pq[i].value.GetSizeInBytes() < pq[j].value.GetSizeInBytes()
}
func (pq priorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
func (pq *priorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*item)
item.index = n
*pq = append(*pq, item)
}
func (pq *priorityQueue) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
item.index = -1 // for safety
*pq = old[0 : n-1]
return item
}
func (pq *priorityQueue) update(item *item, value *Bitmap) {
item.value = value
heap.Fix(pq, item.index)
}
/////////////
// The containerPriorityQueue is used to keep the containers of various Bitmaps sorted.
////////////
type containeritem struct {
value *Bitmap
keyindex int
index int
}
type containerPriorityQueue []*containeritem
func (pq containerPriorityQueue) Len() int { return len(pq) }
func (pq containerPriorityQueue) Less(i, j int) bool {
k1 := pq[i].value.highlowcontainer.getKeyAtIndex(pq[i].keyindex)
k2 := pq[j].value.highlowcontainer.getKeyAtIndex(pq[j].keyindex)
if k1 != k2 {
return k1 < k2
}
c1 := pq[i].value.highlowcontainer.getContainerAtIndex(pq[i].keyindex)
c2 := pq[j].value.highlowcontainer.getContainerAtIndex(pq[j].keyindex)
return c1.getCardinality() > c2.getCardinality()
}
func (pq containerPriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
func (pq *containerPriorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*containeritem)
item.index = n
*pq = append(*pq, item)
}
func (pq *containerPriorityQueue) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
item.index = -1 // for safety
*pq = old[0 : n-1]
return item
}
//func (pq *containerPriorityQueue) update(item *containeritem, value *Bitmap, keyindex int) {
// item.value = value
// item.keyindex = keyindex
// heap.Fix(pq, item.index)
//}

1667
vendor/github.com/RoaringBitmap/roaring/rle.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1747
vendor/github.com/RoaringBitmap/roaring/rle16.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1126
vendor/github.com/RoaringBitmap/roaring/rle16_gen.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1118
vendor/github.com/RoaringBitmap/roaring/rle_gen.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

163
vendor/github.com/RoaringBitmap/roaring/rlecommon.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package roaring
import (
"fmt"
)
// common to rle32.go and rle16.go
// rleVerbose controls whether p() prints show up.
// The testing package sets this based on
// testing.Verbose().
var rleVerbose bool
// p is a shorthand for fmt.Printf with beginning and
// trailing newlines. p() makes it easy
// to add diagnostic print statements.
func p(format string, args ...interface{}) {
if rleVerbose {
fmt.Printf("\n"+format+"\n", args...)
}
}
// MaxUint32 is the largest uint32 value.
const MaxUint32 = 4294967295
// MaxUint16 is the largest 16 bit unsigned int.
// This is the largest value an interval16 can store.
const MaxUint16 = 65535
// searchOptions allows us to accelerate runContainer32.search with
// prior knowledge of (mostly lower) bounds. This is used by Union
// and Intersect.
type searchOptions struct {
// start here instead of at 0
startIndex int64
// upper bound instead of len(rc.iv);
// endxIndex == 0 means ignore the bound and use
// endxIndex == n ==len(rc.iv) which is also
// naturally the default for search()
// when opt = nil.
endxIndex int64
}
// And finds the intersection of rc and b.
func (rc *runContainer32) And(b *Bitmap) *Bitmap {
out := NewBitmap()
for _, p := range rc.iv {
for i := p.start; i <= p.last; i++ {
if b.Contains(i) {
out.Add(i)
}
}
}
return out
}
// Xor returns the exclusive-or of rc and b.
func (rc *runContainer32) Xor(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
for v := p.start; v <= p.last; v++ {
if out.Contains(v) {
out.RemoveRange(uint64(v), uint64(v+1))
} else {
out.Add(v)
}
}
}
return out
}
// Or returns the union of rc and b.
func (rc *runContainer32) Or(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
for v := p.start; v <= p.last; v++ {
out.Add(v)
}
}
return out
}
// trial is used in the randomized testing of runContainers
type trial struct {
n int
percentFill float64
ntrial int
// only in the union test
// only subtract test
percentDelete float64
// only in 067 randomized operations
// we do this + 1 passes
numRandomOpsPass int
// allow sampling range control
// only recent tests respect this.
srang *interval16
}
// And finds the intersection of rc and b.
func (rc *runContainer16) And(b *Bitmap) *Bitmap {
out := NewBitmap()
for _, p := range rc.iv {
plast := p.last()
for i := p.start; i <= plast; i++ {
if b.Contains(uint32(i)) {
out.Add(uint32(i))
}
}
}
return out
}
// Xor returns the exclusive-or of rc and b.
func (rc *runContainer16) Xor(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
plast := p.last()
for v := p.start; v <= plast; v++ {
w := uint32(v)
if out.Contains(w) {
out.RemoveRange(uint64(w), uint64(w+1))
} else {
out.Add(w)
}
}
}
return out
}
// Or returns the union of rc and b.
func (rc *runContainer16) Or(b *Bitmap) *Bitmap {
out := b.Clone()
for _, p := range rc.iv {
plast := p.last()
for v := p.start; v <= plast; v++ {
out.Add(uint32(v))
}
}
return out
}
//func (rc *runContainer32) and(container) container {
// panic("TODO. not yet implemented")
//}
// serializedSizeInBytes returns the number of bytes of memory
// required by this runContainer16. This is for the
// Roaring format, as specified https://github.com/RoaringBitmap/RoaringFormatSpec/
func (rc *runContainer16) serializedSizeInBytes() int {
// number of runs in one uint16, then each run
// needs two more uint16
return 2 + len(rc.iv)*4
}
// serializedSizeInBytes returns the number of bytes of memory
// required by this runContainer32.
func (rc *runContainer32) serializedSizeInBytes() int {
return 4 + len(rc.iv)*8
}

695
vendor/github.com/RoaringBitmap/roaring/rlei.go generated vendored Normal file
View File

@ -0,0 +1,695 @@
package roaring
///////////////////////////////////////////////////
//
// container interface methods for runContainer16
//
///////////////////////////////////////////////////
import (
"fmt"
)
// compile time verify we meet interface requirements
var _ container = &runContainer16{}
func (rc *runContainer16) clone() container {
return newRunContainer16CopyIv(rc.iv)
}
func (rc *runContainer16) minimum() uint16 {
return rc.iv[0].start // assume not empty
}
func (rc *runContainer16) maximum() uint16 {
return rc.iv[len(rc.iv)-1].last() // assume not empty
}
func (rc *runContainer16) isFull() bool {
return (len(rc.iv) == 1) && ((rc.iv[0].start == 0) && (rc.iv[0].last() == MaxUint16))
}
func (rc *runContainer16) and(a container) container {
if rc.isFull() {
return a.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.intersect(c)
case *arrayContainer:
return rc.andArray(c)
case *bitmapContainer:
return rc.andBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) andCardinality(a container) int {
switch c := a.(type) {
case *runContainer16:
return int(rc.intersectCardinality(c))
case *arrayContainer:
return rc.andArrayCardinality(c)
case *bitmapContainer:
return rc.andBitmapContainerCardinality(c)
}
panic("unsupported container type")
}
// andBitmapContainer finds the intersection of rc and b.
func (rc *runContainer16) andBitmapContainer(bc *bitmapContainer) container {
bc2 := newBitmapContainerFromRun(rc)
return bc2.andBitmap(bc)
}
func (rc *runContainer16) andArrayCardinality(ac *arrayContainer) int {
pos := 0
answer := 0
maxpos := ac.getCardinality()
if maxpos == 0 {
return 0 // won't happen in actual code
}
v := ac.content[pos]
mainloop:
for _, p := range rc.iv {
for v < p.start {
pos++
if pos == maxpos {
break mainloop
}
v = ac.content[pos]
}
for v <= p.last() {
answer++
pos++
if pos == maxpos {
break mainloop
}
v = ac.content[pos]
}
}
return answer
}
func (rc *runContainer16) iand(a container) container {
if rc.isFull() {
return a.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.inplaceIntersect(c)
case *arrayContainer:
return rc.andArray(c)
case *bitmapContainer:
return rc.iandBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) inplaceIntersect(rc2 *runContainer16) container {
// TODO: optimize by doing less allocation, possibly?
// sect will be new
sect := rc.intersect(rc2)
*rc = *sect
return rc
}
func (rc *runContainer16) iandBitmapContainer(bc *bitmapContainer) container {
isect := rc.andBitmapContainer(bc)
*rc = *newRunContainer16FromContainer(isect)
return rc
}
func (rc *runContainer16) andArray(ac *arrayContainer) container {
if len(rc.iv) == 0 {
return newArrayContainer()
}
acCardinality := ac.getCardinality()
c := newArrayContainerCapacity(acCardinality)
for rlePos, arrayPos := 0, 0; arrayPos < acCardinality; {
iv := rc.iv[rlePos]
arrayVal := ac.content[arrayPos]
for iv.last() < arrayVal {
rlePos++
if rlePos == len(rc.iv) {
return c
}
iv = rc.iv[rlePos]
}
if iv.start > arrayVal {
arrayPos = advanceUntil(ac.content, arrayPos, len(ac.content), iv.start)
} else {
c.content = append(c.content, arrayVal)
arrayPos++
}
}
return c
}
func (rc *runContainer16) andNot(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.andNotArray(c)
case *bitmapContainer:
return rc.andNotBitmap(c)
case *runContainer16:
return rc.andNotRunContainer16(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
k := 0
var val int64
for _, p := range rc.iv {
n := p.runlen()
for j := int64(0); j < n; j++ {
val = int64(p.start) + j
x[k+i] = uint32(val) | mask
k++
}
}
}
func (rc *runContainer16) getShortIterator() shortIterable {
return rc.newRunIterator16()
}
func (rc *runContainer16) getManyIterator() manyIterable {
return rc.newManyRunIterator16()
}
// add the values in the range [firstOfRange, endx). endx
// is still abe to express 2^16 because it is an int not an uint16.
func (rc *runContainer16) iaddRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange", endx))
}
addme := newRunContainer16TakeOwnership([]interval16{
{
start: uint16(firstOfRange),
length: uint16(endx - 1 - firstOfRange),
},
})
*rc = *rc.union(addme)
return rc
}
// remove the values in the range [firstOfRange,endx)
func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("request to iremove empty set [%v, %v),"+
" nothing to do.", firstOfRange, endx))
//return rc
}
x := newInterval16Range(uint16(firstOfRange), uint16(endx-1))
rc.isubtract(x)
return rc
}
// not flip the values in the range [firstOfRange,endx)
func (rc *runContainer16) not(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
}
return rc.Not(firstOfRange, endx)
}
// Not flips the values in the range [firstOfRange,endx).
// This is not inplace. Only the returned value has the flipped bits.
//
// Currently implemented as (!A intersect B) union (A minus B),
// where A is rc, and B is the supplied [firstOfRange, endx) interval.
//
// TODO(time optimization): convert this to a single pass
// algorithm by copying AndNotRunContainer16() and modifying it.
// Current routine is correct but
// makes 2 more passes through the arrays than should be
// strictly necessary. Measure both ways though--this may not matter.
//
func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange == %v", endx, firstOfRange))
}
if firstOfRange >= endx {
return rc.Clone()
}
a := rc
// algo:
// (!A intersect B) union (A minus B)
nota := a.invert()
bs := []interval16{newInterval16Range(uint16(firstOfRange), uint16(endx-1))}
b := newRunContainer16TakeOwnership(bs)
notAintersectB := nota.intersect(b)
aMinusB := a.AndNotRunContainer16(b)
rc2 := notAintersectB.union(aMinusB)
return rc2
}
// equals is now logical equals; it does not require the
// same underlying container type.
func (rc *runContainer16) equals(o container) bool {
srb, ok := o.(*runContainer16)
if !ok {
// maybe value instead of pointer
val, valok := o.(*runContainer16)
if valok {
srb = val
ok = true
}
}
if ok {
// Check if the containers are the same object.
if rc == srb {
return true
}
if len(srb.iv) != len(rc.iv) {
return false
}
for i, v := range rc.iv {
if v != srb.iv[i] {
return false
}
}
return true
}
// use generic comparison
if o.getCardinality() != rc.getCardinality() {
return false
}
rit := rc.getShortIterator()
bit := o.getShortIterator()
//k := 0
for rit.hasNext() {
if bit.next() != rit.next() {
return false
}
//k++
}
return true
}
func (rc *runContainer16) iaddReturnMinimized(x uint16) container {
rc.Add(x)
return rc
}
func (rc *runContainer16) iadd(x uint16) (wasNew bool) {
return rc.Add(x)
}
func (rc *runContainer16) iremoveReturnMinimized(x uint16) container {
rc.removeKey(x)
return rc
}
func (rc *runContainer16) iremove(x uint16) bool {
return rc.removeKey(x)
}
func (rc *runContainer16) or(a container) container {
if rc.isFull() {
return rc.clone()
}
switch c := a.(type) {
case *runContainer16:
return rc.union(c)
case *arrayContainer:
return rc.orArray(c)
case *bitmapContainer:
return rc.orBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) orCardinality(a container) int {
switch c := a.(type) {
case *runContainer16:
return int(rc.unionCardinality(c))
case *arrayContainer:
return rc.orArrayCardinality(c)
case *bitmapContainer:
return rc.orBitmapContainerCardinality(c)
}
panic("unsupported container type")
}
// orBitmapContainer finds the union of rc and bc.
func (rc *runContainer16) orBitmapContainer(bc *bitmapContainer) container {
bc2 := newBitmapContainerFromRun(rc)
return bc2.iorBitmap(bc)
}
func (rc *runContainer16) andBitmapContainerCardinality(bc *bitmapContainer) int {
answer := 0
for i := range rc.iv {
answer += bc.getCardinalityInRange(uint(rc.iv[i].start), uint(rc.iv[i].last())+1)
}
//bc.computeCardinality()
return answer
}
func (rc *runContainer16) orBitmapContainerCardinality(bc *bitmapContainer) int {
return rc.getCardinality() + bc.getCardinality() - rc.andBitmapContainerCardinality(bc)
}
// orArray finds the union of rc and ac.
func (rc *runContainer16) orArray(ac *arrayContainer) container {
bc1 := newBitmapContainerFromRun(rc)
bc2 := ac.toBitmapContainer()
return bc1.orBitmap(bc2)
}
// orArray finds the union of rc and ac.
func (rc *runContainer16) orArrayCardinality(ac *arrayContainer) int {
return ac.getCardinality() + rc.getCardinality() - rc.andArrayCardinality(ac)
}
func (rc *runContainer16) ior(a container) container {
if rc.isFull() {
return rc
}
switch c := a.(type) {
case *runContainer16:
return rc.inplaceUnion(c)
case *arrayContainer:
return rc.iorArray(c)
case *bitmapContainer:
return rc.iorBitmapContainer(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) inplaceUnion(rc2 *runContainer16) container {
p("rc.inplaceUnion with len(rc2.iv)=%v", len(rc2.iv))
for _, p := range rc2.iv {
last := int64(p.last())
for i := int64(p.start); i <= last; i++ {
rc.Add(uint16(i))
}
}
return rc
}
func (rc *runContainer16) iorBitmapContainer(bc *bitmapContainer) container {
it := bc.getShortIterator()
for it.hasNext() {
rc.Add(it.next())
}
return rc
}
func (rc *runContainer16) iorArray(ac *arrayContainer) container {
it := ac.getShortIterator()
for it.hasNext() {
rc.Add(it.next())
}
return rc
}
// lazyIOR is described (not yet implemented) in
// this nice note from @lemire on
// https://github.com/RoaringBitmap/roaring/pull/70#issuecomment-263613737
//
// Description of lazyOR and lazyIOR from @lemire:
//
// Lazy functions are optional and can be simply
// wrapper around non-lazy functions.
//
// The idea of "laziness" is as follows. It is
// inspired by the concept of lazy evaluation
// you might be familiar with (functional programming
// and all that). So a roaring bitmap is
// such that all its containers are, in some
// sense, chosen to use as little memory as
// possible. This is nice. Also, all bitsets
// are "cardinality aware" so that you can do
// fast rank/select queries, or query the
// cardinality of the whole bitmap... very fast,
// without latency.
//
// However, imagine that you are aggregating 100
// bitmaps together. So you OR the first two, then OR
// that with the third one and so forth. Clearly,
// intermediate bitmaps don't need to be as
// compressed as possible, right? They can be
// in a "dirty state". You only need the end
// result to be in a nice state... which you
// can achieve by calling repairAfterLazy at the end.
//
// The Java/C code does something special for
// the in-place lazy OR runs. The idea is that
// instead of taking two run containers and
// generating a new one, we actually try to
// do the computation in-place through a
// technique invented by @gssiyankai (pinging him!).
// What you do is you check whether the host
// run container has lots of extra capacity.
// If it does, you move its data at the end of
// the backing array, and then you write
// the answer at the beginning. What this
// trick does is minimize memory allocations.
//
func (rc *runContainer16) lazyIOR(a container) container {
// not lazy at the moment
// TODO: make it lazy
return rc.ior(a)
/*
switch c := a.(type) {
case *arrayContainer:
return rc.lazyIorArray(c)
case *bitmapContainer:
return rc.lazyIorBitmap(c)
case *runContainer16:
return rc.lazyIorRun16(c)
}
panic("unsupported container type")
*/
}
// lazyOR is described above in lazyIOR.
func (rc *runContainer16) lazyOR(a container) container {
// not lazy at the moment
// TODO: make it lazy
return rc.or(a)
/*
switch c := a.(type) {
case *arrayContainer:
return rc.lazyOrArray(c)
case *bitmapContainer:
return rc.lazyOrBitmap(c)
case *runContainer16:
return rc.lazyOrRunContainer16(c)
}
panic("unsupported container type")
*/
}
func (rc *runContainer16) intersects(a container) bool {
// TODO: optimize by doing inplace/less allocation, possibly?
isect := rc.and(a)
return isect.getCardinality() > 0
}
func (rc *runContainer16) xor(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.xorArray(c)
case *bitmapContainer:
return rc.xorBitmap(c)
case *runContainer16:
return rc.xorRunContainer16(c)
}
panic("unsupported container type")
}
func (rc *runContainer16) iandNot(a container) container {
switch c := a.(type) {
case *arrayContainer:
return rc.iandNotArray(c)
case *bitmapContainer:
return rc.iandNotBitmap(c)
case *runContainer16:
return rc.iandNotRunContainer16(c)
}
panic("unsupported container type")
}
// flip the values in the range [firstOfRange,endx)
func (rc *runContainer16) inot(firstOfRange, endx int) container {
if firstOfRange >= endx {
panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
}
// TODO: minimize copies, do it all inplace; not() makes a copy.
rc = rc.Not(firstOfRange, endx)
return rc
}
func (rc *runContainer16) getCardinality() int {
return int(rc.cardinality())
}
func (rc *runContainer16) rank(x uint16) int {
n := int64(len(rc.iv))
xx := int64(x)
w, already, _ := rc.search(xx, nil)
if w < 0 {
return 0
}
if !already && w == n-1 {
return rc.getCardinality()
}
var rnk int64
if !already {
for i := int64(0); i <= w; i++ {
rnk += rc.iv[i].runlen()
}
return int(rnk)
}
for i := int64(0); i < w; i++ {
rnk += rc.iv[i].runlen()
}
rnk += int64(x-rc.iv[w].start) + 1
return int(rnk)
}
func (rc *runContainer16) selectInt(x uint16) int {
return rc.selectInt16(x)
}
func (rc *runContainer16) andNotRunContainer16(b *runContainer16) container {
return rc.AndNotRunContainer16(b)
}
func (rc *runContainer16) andNotArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
return rcb.andNotBitmap(acb)
}
func (rc *runContainer16) andNotBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
return rcb.andNotBitmap(bc)
}
func (rc *runContainer16) toBitmapContainer() *bitmapContainer {
p("run16 toBitmap starting; rc has %v ranges", len(rc.iv))
bc := newBitmapContainer()
for i := range rc.iv {
bc.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
}
bc.computeCardinality()
return bc
}
func (rc *runContainer16) iandNotRunContainer16(x2 *runContainer16) container {
rcb := rc.toBitmapContainer()
x2b := x2.toBitmapContainer()
rcb.iandNotBitmapSurely(x2b)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) iandNotArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
rcb.iandNotBitmapSurely(acb)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) iandNotBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
rcb.iandNotBitmapSurely(bc)
// TODO: check size and optimize the return value
// TODO: is inplace modification really required? If not, elide the copy.
rc2 := newRunContainer16FromBitmapContainer(rcb)
*rc = *rc2
return rc
}
func (rc *runContainer16) xorRunContainer16(x2 *runContainer16) container {
rcb := rc.toBitmapContainer()
x2b := x2.toBitmapContainer()
return rcb.xorBitmap(x2b)
}
func (rc *runContainer16) xorArray(ac *arrayContainer) container {
rcb := rc.toBitmapContainer()
acb := ac.toBitmapContainer()
return rcb.xorBitmap(acb)
}
func (rc *runContainer16) xorBitmap(bc *bitmapContainer) container {
rcb := rc.toBitmapContainer()
return rcb.xorBitmap(bc)
}
// convert to bitmap or array *if needed*
func (rc *runContainer16) toEfficientContainer() container {
// runContainer16SerializedSizeInBytes(numRuns)
sizeAsRunContainer := rc.getSizeInBytes()
sizeAsBitmapContainer := bitmapContainerSizeInBytes()
card := int(rc.cardinality())
sizeAsArrayContainer := arrayContainerSizeInBytes(card)
if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
return rc
}
if card <= arrayDefaultMaxSize {
return rc.toArrayContainer()
}
bc := newBitmapContainerFromRun(rc)
return bc
}
func (rc *runContainer16) toArrayContainer() *arrayContainer {
ac := newArrayContainer()
for i := range rc.iv {
ac.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
}
return ac
}
func newRunContainer16FromContainer(c container) *runContainer16 {
switch x := c.(type) {
case *runContainer16:
return x.Clone()
case *arrayContainer:
return newRunContainer16FromArray(x)
case *bitmapContainer:
return newRunContainer16FromBitmapContainer(x)
}
panic("unsupported container type")
}

1345
vendor/github.com/RoaringBitmap/roaring/roaring.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

893
vendor/github.com/RoaringBitmap/roaring/roaringarray.go generated vendored Normal file
View File

@ -0,0 +1,893 @@
package roaring
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
snappy "github.com/glycerine/go-unsnap-stream"
"github.com/tinylib/msgp/msgp"
)
//go:generate msgp -unexported
type container interface {
clone() container
and(container) container
andCardinality(container) int
iand(container) container // i stands for inplace
andNot(container) container
iandNot(container) container // i stands for inplace
getCardinality() int
// rank returns the number of integers that are
// smaller or equal to x. rank(infinity) would be getCardinality().
rank(uint16) int
iadd(x uint16) bool // inplace, returns true if x was new.
iaddReturnMinimized(uint16) container // may change return type to minimize storage.
//addRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused)
iaddRange(start, endx int) container // i stands for inplace, range is [firstOfRange,endx)
iremove(x uint16) bool // inplace, returns true if x was present.
iremoveReturnMinimized(uint16) container // may change return type to minimize storage.
not(start, final int) container // range is [firstOfRange,lastOfRange)
inot(firstOfRange, endx int) container // i stands for inplace, range is [firstOfRange,endx)
xor(r container) container
getShortIterator() shortIterable
getManyIterator() manyIterable
contains(i uint16) bool
maximum() uint16
minimum() uint16
// equals is now logical equals; it does not require the
// same underlying container types, but compares across
// any of the implementations.
equals(r container) bool
fillLeastSignificant16bits(array []uint32, i int, mask uint32)
or(r container) container
orCardinality(r container) int
isFull() bool
ior(r container) container // i stands for inplace
intersects(r container) bool // whether the two containers intersect
lazyOR(r container) container
lazyIOR(r container) container
getSizeInBytes() int
//removeRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused)
iremoveRange(start, final int) container // i stands for inplace, range is [firstOfRange,lastOfRange)
selectInt(x uint16) int // selectInt returns the xth integer in the container
serializedSizeInBytes() int
readFrom(io.Reader) (int, error)
writeTo(io.Writer) (int, error)
numberOfRuns() int
toEfficientContainer() container
String() string
containerType() contype
}
type contype uint8
const (
bitmapContype contype = iota
arrayContype
run16Contype
run32Contype
)
// careful: range is [firstOfRange,lastOfRange]
func rangeOfOnes(start, last int) container {
if start > MaxUint16 {
panic("rangeOfOnes called with start > MaxUint16")
}
if last > MaxUint16 {
panic("rangeOfOnes called with last > MaxUint16")
}
if start < 0 {
panic("rangeOfOnes called with start < 0")
}
if last < 0 {
panic("rangeOfOnes called with last < 0")
}
return newRunContainer16Range(uint16(start), uint16(last))
}
type roaringArray struct {
keys []uint16
containers []container `msg:"-"` // don't try to serialize directly.
needCopyOnWrite []bool
copyOnWrite bool
// conserz is used at serialization time
// to serialize containers. Otherwise empty.
conserz []containerSerz
}
// containerSerz facilitates serializing container (tricky to
// serialize because it is an interface) by providing a
// light wrapper with a type identifier.
type containerSerz struct {
t contype `msg:"t"` // type
r msgp.Raw `msg:"r"` // Raw msgpack of the actual container type
}
func newRoaringArray() *roaringArray {
return &roaringArray{}
}
// runOptimize compresses the element containers to minimize space consumed.
// Q: how does this interact with copyOnWrite and needCopyOnWrite?
// A: since we aren't changing the logical content, just the representation,
// we don't bother to check the needCopyOnWrite bits. We replace
// (possibly all) elements of ra.containers in-place with space
// optimized versions.
func (ra *roaringArray) runOptimize() {
for i := range ra.containers {
ra.containers[i] = ra.containers[i].toEfficientContainer()
}
}
func (ra *roaringArray) appendContainer(key uint16, value container, mustCopyOnWrite bool) {
ra.keys = append(ra.keys, key)
ra.containers = append(ra.containers, value)
ra.needCopyOnWrite = append(ra.needCopyOnWrite, mustCopyOnWrite)
}
func (ra *roaringArray) appendWithoutCopy(sa roaringArray, startingindex int) {
mustCopyOnWrite := sa.needCopyOnWrite[startingindex]
ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], mustCopyOnWrite)
}
func (ra *roaringArray) appendCopy(sa roaringArray, startingindex int) {
// cow only if the two request it, or if we already have a lightweight copy
copyonwrite := (ra.copyOnWrite && sa.copyOnWrite) || sa.needsCopyOnWrite(startingindex)
if !copyonwrite {
// since there is no copy-on-write, we need to clone the container (this is important)
ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex].clone(), copyonwrite)
} else {
ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], copyonwrite)
if !sa.needsCopyOnWrite(startingindex) {
sa.setNeedsCopyOnWrite(startingindex)
}
}
}
func (ra *roaringArray) appendWithoutCopyMany(sa roaringArray, startingindex, end int) {
for i := startingindex; i < end; i++ {
ra.appendWithoutCopy(sa, i)
}
}
func (ra *roaringArray) appendCopyMany(sa roaringArray, startingindex, end int) {
for i := startingindex; i < end; i++ {
ra.appendCopy(sa, i)
}
}
func (ra *roaringArray) appendCopiesUntil(sa roaringArray, stoppingKey uint16) {
// cow only if the two request it, or if we already have a lightweight copy
copyonwrite := ra.copyOnWrite && sa.copyOnWrite
for i := 0; i < sa.size(); i++ {
if sa.keys[i] >= stoppingKey {
break
}
thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i)
if thiscopyonewrite {
ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite)
if !sa.needsCopyOnWrite(i) {
sa.setNeedsCopyOnWrite(i)
}
} else {
// since there is no copy-on-write, we need to clone the container (this is important)
ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite)
}
}
}
func (ra *roaringArray) appendCopiesAfter(sa roaringArray, beforeStart uint16) {
// cow only if the two request it, or if we already have a lightweight copy
copyonwrite := ra.copyOnWrite && sa.copyOnWrite
startLocation := sa.getIndex(beforeStart)
if startLocation >= 0 {
startLocation++
} else {
startLocation = -startLocation - 1
}
for i := startLocation; i < sa.size(); i++ {
thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i)
if thiscopyonewrite {
ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite)
if !sa.needsCopyOnWrite(i) {
sa.setNeedsCopyOnWrite(i)
}
} else {
// since there is no copy-on-write, we need to clone the container (this is important)
ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite)
}
}
}
func (ra *roaringArray) removeIndexRange(begin, end int) {
if end <= begin {
return
}
r := end - begin
copy(ra.keys[begin:], ra.keys[end:])
copy(ra.containers[begin:], ra.containers[end:])
copy(ra.needCopyOnWrite[begin:], ra.needCopyOnWrite[end:])
ra.resize(len(ra.keys) - r)
}
func (ra *roaringArray) resize(newsize int) {
for k := newsize; k < len(ra.containers); k++ {
ra.containers[k] = nil
}
ra.keys = ra.keys[:newsize]
ra.containers = ra.containers[:newsize]
ra.needCopyOnWrite = ra.needCopyOnWrite[:newsize]
}
func (ra *roaringArray) clear() {
ra.resize(0)
ra.copyOnWrite = false
ra.conserz = nil
}
func (ra *roaringArray) clone() *roaringArray {
sa := roaringArray{}
sa.copyOnWrite = ra.copyOnWrite
// this is where copyOnWrite is used.
if ra.copyOnWrite {
sa.keys = make([]uint16, len(ra.keys))
copy(sa.keys, ra.keys)
sa.containers = make([]container, len(ra.containers))
copy(sa.containers, ra.containers)
sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite))
ra.markAllAsNeedingCopyOnWrite()
sa.markAllAsNeedingCopyOnWrite()
// sa.needCopyOnWrite is shared
} else {
// make a full copy
sa.keys = make([]uint16, len(ra.keys))
copy(sa.keys, ra.keys)
sa.containers = make([]container, len(ra.containers))
for i := range sa.containers {
sa.containers[i] = ra.containers[i].clone()
}
sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite))
}
return &sa
}
// unused function:
//func (ra *roaringArray) containsKey(x uint16) bool {
// return (ra.binarySearch(0, int64(len(ra.keys)), x) >= 0)
//}
func (ra *roaringArray) getContainer(x uint16) container {
i := ra.binarySearch(0, int64(len(ra.keys)), x)
if i < 0 {
return nil
}
return ra.containers[i]
}
func (ra *roaringArray) getContainerAtIndex(i int) container {
return ra.containers[i]
}
func (ra *roaringArray) getFastContainerAtIndex(i int, needsWriteable bool) container {
c := ra.getContainerAtIndex(i)
switch t := c.(type) {
case *arrayContainer:
c = t.toBitmapContainer()
case *runContainer16:
if !t.isFull() {
c = t.toBitmapContainer()
}
case *bitmapContainer:
if needsWriteable && ra.needCopyOnWrite[i] {
c = ra.containers[i].clone()
}
}
return c
}
func (ra *roaringArray) getWritableContainerAtIndex(i int) container {
if ra.needCopyOnWrite[i] {
ra.containers[i] = ra.containers[i].clone()
ra.needCopyOnWrite[i] = false
}
return ra.containers[i]
}
func (ra *roaringArray) getIndex(x uint16) int {
// before the binary search, we optimize for frequent cases
size := len(ra.keys)
if (size == 0) || (ra.keys[size-1] == x) {
return size - 1
}
return ra.binarySearch(0, int64(size), x)
}
func (ra *roaringArray) getKeyAtIndex(i int) uint16 {
return ra.keys[i]
}
func (ra *roaringArray) insertNewKeyValueAt(i int, key uint16, value container) {
ra.keys = append(ra.keys, 0)
ra.containers = append(ra.containers, nil)
copy(ra.keys[i+1:], ra.keys[i:])
copy(ra.containers[i+1:], ra.containers[i:])
ra.keys[i] = key
ra.containers[i] = value
ra.needCopyOnWrite = append(ra.needCopyOnWrite, false)
copy(ra.needCopyOnWrite[i+1:], ra.needCopyOnWrite[i:])
ra.needCopyOnWrite[i] = false
}
func (ra *roaringArray) remove(key uint16) bool {
i := ra.binarySearch(0, int64(len(ra.keys)), key)
if i >= 0 { // if a new key
ra.removeAtIndex(i)
return true
}
return false
}
func (ra *roaringArray) removeAtIndex(i int) {
copy(ra.keys[i:], ra.keys[i+1:])
copy(ra.containers[i:], ra.containers[i+1:])
copy(ra.needCopyOnWrite[i:], ra.needCopyOnWrite[i+1:])
ra.resize(len(ra.keys) - 1)
}
func (ra *roaringArray) setContainerAtIndex(i int, c container) {
ra.containers[i] = c
}
func (ra *roaringArray) replaceKeyAndContainerAtIndex(i int, key uint16, c container, mustCopyOnWrite bool) {
ra.keys[i] = key
ra.containers[i] = c
ra.needCopyOnWrite[i] = mustCopyOnWrite
}
func (ra *roaringArray) size() int {
return len(ra.keys)
}
func (ra *roaringArray) binarySearch(begin, end int64, ikey uint16) int {
low := begin
high := end - 1
for low+16 <= high {
middleIndex := low + (high-low)/2 // avoid overflow
middleValue := ra.keys[middleIndex]
if middleValue < ikey {
low = middleIndex + 1
} else if middleValue > ikey {
high = middleIndex - 1
} else {
return int(middleIndex)
}
}
for ; low <= high; low++ {
val := ra.keys[low]
if val >= ikey {
if val == ikey {
return int(low)
}
break
}
}
return -int(low + 1)
}
func (ra *roaringArray) equals(o interface{}) bool {
srb, ok := o.(roaringArray)
if ok {
if srb.size() != ra.size() {
return false
}
for i, k := range ra.keys {
if k != srb.keys[i] {
return false
}
}
for i, c := range ra.containers {
if !c.equals(srb.containers[i]) {
return false
}
}
return true
}
return false
}
func (ra *roaringArray) headerSize() uint64 {
size := uint64(len(ra.keys))
if ra.hasRunCompression() {
if size < noOffsetThreshold { // for small bitmaps, we omit the offsets
return 4 + (size+7)/8 + 4*size
}
return 4 + (size+7)/8 + 8*size // - 4 because we pack the size with the cookie
}
return 4 + 4 + 8*size
}
// should be dirt cheap
func (ra *roaringArray) serializedSizeInBytes() uint64 {
answer := ra.headerSize()
for _, c := range ra.containers {
answer += uint64(c.serializedSizeInBytes())
}
return answer
}
//
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
//
func (ra *roaringArray) toBytes() ([]byte, error) {
stream := &bytes.Buffer{}
hasRun := ra.hasRunCompression()
isRunSizeInBytes := 0
cookieSize := 8
if hasRun {
cookieSize = 4
isRunSizeInBytes = (len(ra.keys) + 7) / 8
}
descriptiveHeaderSize := 4 * len(ra.keys)
preambleSize := cookieSize + isRunSizeInBytes + descriptiveHeaderSize
buf := make([]byte, preambleSize+4*len(ra.keys))
nw := 0
if hasRun {
binary.LittleEndian.PutUint16(buf[0:], uint16(serialCookie))
nw += 2
binary.LittleEndian.PutUint16(buf[2:], uint16(len(ra.keys)-1))
nw += 2
// compute isRun bitmap
var ir []byte
isRun := newBitmapContainer()
for i, c := range ra.containers {
switch c.(type) {
case *runContainer16:
isRun.iadd(uint16(i))
}
}
// convert to little endian
ir = isRun.asLittleEndianByteSlice()[:isRunSizeInBytes]
nw += copy(buf[nw:], ir)
} else {
binary.LittleEndian.PutUint32(buf[0:], uint32(serialCookieNoRunContainer))
nw += 4
binary.LittleEndian.PutUint32(buf[4:], uint32(len(ra.keys)))
nw += 4
}
// descriptive header
for i, key := range ra.keys {
binary.LittleEndian.PutUint16(buf[nw:], key)
nw += 2
c := ra.containers[i]
binary.LittleEndian.PutUint16(buf[nw:], uint16(c.getCardinality()-1))
nw += 2
}
startOffset := int64(preambleSize + 4*len(ra.keys))
if !hasRun || (len(ra.keys) >= noOffsetThreshold) {
// offset header
for _, c := range ra.containers {
binary.LittleEndian.PutUint32(buf[nw:], uint32(startOffset))
nw += 4
switch rc := c.(type) {
case *runContainer16:
startOffset += 2 + int64(len(rc.iv))*4
default:
startOffset += int64(getSizeInBytesFromCardinality(c.getCardinality()))
}
}
}
_, err := stream.Write(buf[:nw])
if err != nil {
return nil, err
}
for i, c := range ra.containers {
_ = i
_, err := c.writeTo(stream)
if err != nil {
return nil, err
}
}
return stream.Bytes(), nil
}
//
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
//
func (ra *roaringArray) writeTo(out io.Writer) (int64, error) {
by, err := ra.toBytes()
if err != nil {
return 0, err
}
n, err := out.Write(by)
if err == nil && n < len(by) {
err = io.ErrShortWrite
}
return int64(n), err
}
func (ra *roaringArray) fromBuffer(buf []byte) (int64, error) {
pos := 0
if len(buf) < 8 {
return 0, fmt.Errorf("buffer too small, expecting at least 8 bytes, was %d", len(buf))
}
cookie := binary.LittleEndian.Uint32(buf)
pos += 4
var size uint32 // number of containers
haveRunContainers := false
var isRunBitmap []byte
// cookie header
if cookie&0x0000FFFF == serialCookie {
haveRunContainers = true
size = uint32(uint16(cookie>>16) + 1) // number of containers
// create is-run-container bitmap
isRunBitmapSize := (int(size) + 7) / 8
if pos+isRunBitmapSize > len(buf) {
return 0, fmt.Errorf("malformed bitmap, is-run bitmap overruns buffer at %d", pos+isRunBitmapSize)
}
isRunBitmap = buf[pos : pos+isRunBitmapSize]
pos += isRunBitmapSize
} else if cookie == serialCookieNoRunContainer {
size = binary.LittleEndian.Uint32(buf[pos:])
pos += 4
} else {
return 0, fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header")
}
if size > (1 << 16) {
return 0, fmt.Errorf("It is logically impossible to have more than (1<<16) containers.")
}
// descriptive header
// keycard - is {key, cardinality} tuple slice
if pos+2*2*int(size) > len(buf) {
return 0, fmt.Errorf("malfomred bitmap, key-cardinality slice overruns buffer at %d", pos+2*2*int(size))
}
keycard := byteSliceAsUint16Slice(buf[pos : pos+2*2*int(size)])
pos += 2 * 2 * int(size)
if !haveRunContainers || size >= noOffsetThreshold {
pos += 4 * int(size)
}
// Allocate slices upfront as number of containers is known
if cap(ra.containers) >= int(size) {
ra.containers = ra.containers[:size]
} else {
ra.containers = make([]container, size)
}
if cap(ra.keys) >= int(size) {
ra.keys = ra.keys[:size]
} else {
ra.keys = make([]uint16, size)
}
if cap(ra.needCopyOnWrite) >= int(size) {
ra.needCopyOnWrite = ra.needCopyOnWrite[:size]
} else {
ra.needCopyOnWrite = make([]bool, size)
}
for i := uint32(0); i < size; i++ {
key := uint16(keycard[2*i])
card := int(keycard[2*i+1]) + 1
ra.keys[i] = key
ra.needCopyOnWrite[i] = true
if haveRunContainers && isRunBitmap[i/8]&(1<<(i%8)) != 0 {
// run container
nr := binary.LittleEndian.Uint16(buf[pos:])
pos += 2
if pos+int(nr)*4 > len(buf) {
return 0, fmt.Errorf("malformed bitmap, a run container overruns buffer at %d:%d", pos, pos+int(nr)*4)
}
nb := runContainer16{
iv: byteSliceAsInterval16Slice(buf[pos : pos+int(nr)*4]),
card: int64(card),
}
pos += int(nr) * 4
ra.containers[i] = &nb
} else if card > arrayDefaultMaxSize {
// bitmap container
nb := bitmapContainer{
cardinality: card,
bitmap: byteSliceAsUint64Slice(buf[pos : pos+arrayDefaultMaxSize*2]),
}
pos += arrayDefaultMaxSize * 2
ra.containers[i] = &nb
} else {
// array container
nb := arrayContainer{
byteSliceAsUint16Slice(buf[pos : pos+card*2]),
}
pos += card * 2
ra.containers[i] = &nb
}
}
return int64(pos), nil
}
func (ra *roaringArray) readFrom(stream io.Reader) (int64, error) {
pos := 0
var cookie uint32
err := binary.Read(stream, binary.LittleEndian, &cookie)
if err != nil {
return 0, fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err)
}
pos += 4
var size uint32
haveRunContainers := false
var isRun *bitmapContainer
if cookie&0x0000FFFF == serialCookie {
haveRunContainers = true
size = uint32(uint16(cookie>>16) + 1)
bytesToRead := (int(size) + 7) / 8
numwords := (bytesToRead + 7) / 8
by := make([]byte, bytesToRead, numwords*8)
nr, err := io.ReadFull(stream, by)
if err != nil {
return 8 + int64(nr), fmt.Errorf("error in readFrom: could not read the "+
"runContainer bit flags of length %v bytes: %v", bytesToRead, err)
}
pos += bytesToRead
by = by[:cap(by)]
isRun = newBitmapContainer()
for i := 0; i < numwords; i++ {
isRun.bitmap[i] = binary.LittleEndian.Uint64(by)
by = by[8:]
}
} else if cookie == serialCookieNoRunContainer {
err = binary.Read(stream, binary.LittleEndian, &size)
if err != nil {
return 0, fmt.Errorf("error in roaringArray.readFrom: when reading size, got: %s", err)
}
pos += 4
} else {
return 0, fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header")
}
if size > (1 << 16) {
return 0, fmt.Errorf("It is logically impossible to have more than (1<<16) containers.")
}
// descriptive header
keycard := make([]uint16, 2*size, 2*size)
err = binary.Read(stream, binary.LittleEndian, keycard)
if err != nil {
return 0, err
}
pos += 2 * 2 * int(size)
// offset header
if !haveRunContainers || size >= noOffsetThreshold {
io.CopyN(ioutil.Discard, stream, 4*int64(size)) // we never skip ahead so this data can be ignored
pos += 4 * int(size)
}
for i := uint32(0); i < size; i++ {
key := int(keycard[2*i])
card := int(keycard[2*i+1]) + 1
if haveRunContainers && isRun.contains(uint16(i)) {
nb := newRunContainer16()
nr, err := nb.readFrom(stream)
if err != nil {
return 0, err
}
pos += nr
ra.appendContainer(uint16(key), nb, false)
} else if card > arrayDefaultMaxSize {
nb := newBitmapContainer()
nr, err := nb.readFrom(stream)
if err != nil {
return 0, err
}
nb.cardinality = card
pos += nr
ra.appendContainer(keycard[2*i], nb, false)
} else {
nb := newArrayContainerSize(card)
nr, err := nb.readFrom(stream)
if err != nil {
return 0, err
}
pos += nr
ra.appendContainer(keycard[2*i], nb, false)
}
}
return int64(pos), nil
}
func (ra *roaringArray) hasRunCompression() bool {
for _, c := range ra.containers {
switch c.(type) {
case *runContainer16:
return true
}
}
return false
}
func (ra *roaringArray) writeToMsgpack(stream io.Writer) error {
ra.conserz = make([]containerSerz, len(ra.containers))
for i, v := range ra.containers {
switch cn := v.(type) {
case *bitmapContainer:
bts, err := cn.MarshalMsg(nil)
if err != nil {
return err
}
ra.conserz[i].t = bitmapContype
ra.conserz[i].r = bts
case *arrayContainer:
bts, err := cn.MarshalMsg(nil)
if err != nil {
return err
}
ra.conserz[i].t = arrayContype
ra.conserz[i].r = bts
case *runContainer16:
bts, err := cn.MarshalMsg(nil)
if err != nil {
return err
}
ra.conserz[i].t = run16Contype
ra.conserz[i].r = bts
default:
panic(fmt.Errorf("Unrecognized container implementation: %T", cn))
}
}
w := snappy.NewWriter(stream)
err := msgp.Encode(w, ra)
ra.conserz = nil
return err
}
func (ra *roaringArray) readFromMsgpack(stream io.Reader) error {
r := snappy.NewReader(stream)
err := msgp.Decode(r, ra)
if err != nil {
return err
}
if len(ra.containers) != len(ra.keys) {
ra.containers = make([]container, len(ra.keys))
}
for i, v := range ra.conserz {
switch v.t {
case bitmapContype:
c := &bitmapContainer{}
_, err = c.UnmarshalMsg(v.r)
if err != nil {
return err
}
ra.containers[i] = c
case arrayContype:
c := &arrayContainer{}
_, err = c.UnmarshalMsg(v.r)
if err != nil {
return err
}
ra.containers[i] = c
case run16Contype:
c := &runContainer16{}
_, err = c.UnmarshalMsg(v.r)
if err != nil {
return err
}
ra.containers[i] = c
default:
return fmt.Errorf("unrecognized contype serialization code: '%v'", v.t)
}
}
ra.conserz = nil
return nil
}
func (ra *roaringArray) advanceUntil(min uint16, pos int) int {
lower := pos + 1
if lower >= len(ra.keys) || ra.keys[lower] >= min {
return lower
}
spansize := 1
for lower+spansize < len(ra.keys) && ra.keys[lower+spansize] < min {
spansize *= 2
}
var upper int
if lower+spansize < len(ra.keys) {
upper = lower + spansize
} else {
upper = len(ra.keys) - 1
}
if ra.keys[upper] == min {
return upper
}
if ra.keys[upper] < min {
// means
// array
// has no
// item
// >= min
// pos = array.length;
return len(ra.keys)
}
// we know that the next-smallest span was too small
lower += (spansize >> 1)
mid := 0
for lower+1 != upper {
mid = (lower + upper) >> 1
if ra.keys[mid] == min {
return mid
} else if ra.keys[mid] < min {
lower = mid
} else {
upper = mid
}
}
return upper
}
func (ra *roaringArray) markAllAsNeedingCopyOnWrite() {
for i := range ra.needCopyOnWrite {
ra.needCopyOnWrite[i] = true
}
}
func (ra *roaringArray) needsCopyOnWrite(i int) bool {
return ra.needCopyOnWrite[i]
}
func (ra *roaringArray) setNeedsCopyOnWrite(i int) {
ra.needCopyOnWrite[i] = true
}

View File

@ -0,0 +1,529 @@
package roaring
// NOTE: THIS FILE WAS PRODUCED BY THE
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
// DO NOT EDIT
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zxvk uint32
zxvk, err = dc.ReadMapHeader()
if err != nil {
return
}
for zxvk > 0 {
zxvk--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "t":
{
var zbzg uint8
zbzg, err = dc.ReadUint8()
z.t = contype(zbzg)
}
if err != nil {
return
}
case "r":
err = z.r.DecodeMsg(dc)
if err != nil {
return
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "t"
err = en.Append(0x82, 0xa1, 0x74)
if err != nil {
return err
}
err = en.WriteUint8(uint8(z.t))
if err != nil {
return
}
// write "r"
err = en.Append(0xa1, 0x72)
if err != nil {
return err
}
err = z.r.EncodeMsg(en)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "t"
o = append(o, 0x82, 0xa1, 0x74)
o = msgp.AppendUint8(o, uint8(z.t))
// string "r"
o = append(o, 0xa1, 0x72)
o, err = z.r.MarshalMsg(o)
if err != nil {
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zbai uint32
zbai, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for zbai > 0 {
zbai--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "t":
{
var zcmr uint8
zcmr, bts, err = msgp.ReadUint8Bytes(bts)
z.t = contype(zcmr)
}
if err != nil {
return
}
case "r":
bts, err = z.r.UnmarshalMsg(bts)
if err != nil {
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *containerSerz) Msgsize() (s int) {
s = 1 + 2 + msgp.Uint8Size + 2 + z.r.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zajw uint8
zajw, err = dc.ReadUint8()
(*z) = contype(zajw)
}
if err != nil {
return
}
return
}
// EncodeMsg implements msgp.Encodable
func (z contype) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteUint8(uint8(z))
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z contype) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint8(o, uint8(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zwht uint8
zwht, bts, err = msgp.ReadUint8Bytes(bts)
(*z) = contype(zwht)
}
if err != nil {
return
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z contype) Msgsize() (s int) {
s = msgp.Uint8Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zlqf uint32
zlqf, err = dc.ReadMapHeader()
if err != nil {
return
}
for zlqf > 0 {
zlqf--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "keys":
var zdaf uint32
zdaf, err = dc.ReadArrayHeader()
if err != nil {
return
}
if cap(z.keys) >= int(zdaf) {
z.keys = (z.keys)[:zdaf]
} else {
z.keys = make([]uint16, zdaf)
}
for zhct := range z.keys {
z.keys[zhct], err = dc.ReadUint16()
if err != nil {
return
}
}
case "needCopyOnWrite":
var zpks uint32
zpks, err = dc.ReadArrayHeader()
if err != nil {
return
}
if cap(z.needCopyOnWrite) >= int(zpks) {
z.needCopyOnWrite = (z.needCopyOnWrite)[:zpks]
} else {
z.needCopyOnWrite = make([]bool, zpks)
}
for zcua := range z.needCopyOnWrite {
z.needCopyOnWrite[zcua], err = dc.ReadBool()
if err != nil {
return
}
}
case "copyOnWrite":
z.copyOnWrite, err = dc.ReadBool()
if err != nil {
return
}
case "conserz":
var zjfb uint32
zjfb, err = dc.ReadArrayHeader()
if err != nil {
return
}
if cap(z.conserz) >= int(zjfb) {
z.conserz = (z.conserz)[:zjfb]
} else {
z.conserz = make([]containerSerz, zjfb)
}
for zxhx := range z.conserz {
var zcxo uint32
zcxo, err = dc.ReadMapHeader()
if err != nil {
return
}
for zcxo > 0 {
zcxo--
field, err = dc.ReadMapKeyPtr()
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "t":
{
var zeff uint8
zeff, err = dc.ReadUint8()
z.conserz[zxhx].t = contype(zeff)
}
if err != nil {
return
}
case "r":
err = z.conserz[zxhx].r.DecodeMsg(dc)
if err != nil {
return
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
}
default:
err = dc.Skip()
if err != nil {
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "keys"
err = en.Append(0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73)
if err != nil {
return err
}
err = en.WriteArrayHeader(uint32(len(z.keys)))
if err != nil {
return
}
for zhct := range z.keys {
err = en.WriteUint16(z.keys[zhct])
if err != nil {
return
}
}
// write "needCopyOnWrite"
err = en.Append(0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
if err != nil {
return err
}
err = en.WriteArrayHeader(uint32(len(z.needCopyOnWrite)))
if err != nil {
return
}
for zcua := range z.needCopyOnWrite {
err = en.WriteBool(z.needCopyOnWrite[zcua])
if err != nil {
return
}
}
// write "copyOnWrite"
err = en.Append(0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
if err != nil {
return err
}
err = en.WriteBool(z.copyOnWrite)
if err != nil {
return
}
// write "conserz"
err = en.Append(0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a)
if err != nil {
return err
}
err = en.WriteArrayHeader(uint32(len(z.conserz)))
if err != nil {
return
}
for zxhx := range z.conserz {
// map header, size 2
// write "t"
err = en.Append(0x82, 0xa1, 0x74)
if err != nil {
return err
}
err = en.WriteUint8(uint8(z.conserz[zxhx].t))
if err != nil {
return
}
// write "r"
err = en.Append(0xa1, 0x72)
if err != nil {
return err
}
err = z.conserz[zxhx].r.EncodeMsg(en)
if err != nil {
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
// string "keys"
o = append(o, 0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.keys)))
for zhct := range z.keys {
o = msgp.AppendUint16(o, z.keys[zhct])
}
// string "needCopyOnWrite"
o = append(o, 0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
o = msgp.AppendArrayHeader(o, uint32(len(z.needCopyOnWrite)))
for zcua := range z.needCopyOnWrite {
o = msgp.AppendBool(o, z.needCopyOnWrite[zcua])
}
// string "copyOnWrite"
o = append(o, 0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
o = msgp.AppendBool(o, z.copyOnWrite)
// string "conserz"
o = append(o, 0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a)
o = msgp.AppendArrayHeader(o, uint32(len(z.conserz)))
for zxhx := range z.conserz {
// map header, size 2
// string "t"
o = append(o, 0x82, 0xa1, 0x74)
o = msgp.AppendUint8(o, uint8(z.conserz[zxhx].t))
// string "r"
o = append(o, 0xa1, 0x72)
o, err = z.conserz[zxhx].r.MarshalMsg(o)
if err != nil {
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zrsw uint32
zrsw, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for zrsw > 0 {
zrsw--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "keys":
var zxpk uint32
zxpk, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
if cap(z.keys) >= int(zxpk) {
z.keys = (z.keys)[:zxpk]
} else {
z.keys = make([]uint16, zxpk)
}
for zhct := range z.keys {
z.keys[zhct], bts, err = msgp.ReadUint16Bytes(bts)
if err != nil {
return
}
}
case "needCopyOnWrite":
var zdnj uint32
zdnj, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
if cap(z.needCopyOnWrite) >= int(zdnj) {
z.needCopyOnWrite = (z.needCopyOnWrite)[:zdnj]
} else {
z.needCopyOnWrite = make([]bool, zdnj)
}
for zcua := range z.needCopyOnWrite {
z.needCopyOnWrite[zcua], bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
return
}
}
case "copyOnWrite":
z.copyOnWrite, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
return
}
case "conserz":
var zobc uint32
zobc, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
if cap(z.conserz) >= int(zobc) {
z.conserz = (z.conserz)[:zobc]
} else {
z.conserz = make([]containerSerz, zobc)
}
for zxhx := range z.conserz {
var zsnv uint32
zsnv, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return
}
for zsnv > 0 {
zsnv--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return
}
switch msgp.UnsafeString(field) {
case "t":
{
var zkgt uint8
zkgt, bts, err = msgp.ReadUint8Bytes(bts)
z.conserz[zxhx].t = contype(zkgt)
}
if err != nil {
return
}
case "r":
bts, err = z.conserz[zxhx].r.UnmarshalMsg(bts)
if err != nil {
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *roaringArray) Msgsize() (s int) {
s = 1 + 5 + msgp.ArrayHeaderSize + (len(z.keys) * (msgp.Uint16Size)) + 16 + msgp.ArrayHeaderSize + (len(z.needCopyOnWrite) * (msgp.BoolSize)) + 12 + msgp.BoolSize + 8 + msgp.ArrayHeaderSize
for zxhx := range z.conserz {
s += 1 + 2 + msgp.Uint8Size + 2 + z.conserz[zxhx].r.Msgsize()
}
return
}

View File

@ -0,0 +1,83 @@
package roaring
import (
"encoding/binary"
"errors"
"fmt"
"io"
"github.com/tinylib/msgp/msgp"
)
// writeTo for runContainer16 follows this
// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
//
func (b *runContainer16) writeTo(stream io.Writer) (int, error) {
buf := make([]byte, 2+4*len(b.iv))
binary.LittleEndian.PutUint16(buf[0:], uint16(len(b.iv)))
for i, v := range b.iv {
binary.LittleEndian.PutUint16(buf[2+i*4:], v.start)
binary.LittleEndian.PutUint16(buf[2+2+i*4:], v.length)
}
return stream.Write(buf)
}
func (b *runContainer32) writeToMsgpack(stream io.Writer) (int, error) {
bts, err := b.MarshalMsg(nil)
if err != nil {
return 0, err
}
return stream.Write(bts)
}
func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) {
bts, err := b.MarshalMsg(nil)
if err != nil {
return 0, err
}
return stream.Write(bts)
}
func (b *runContainer32) readFromMsgpack(stream io.Reader) (int, error) {
err := msgp.Decode(stream, b)
return 0, err
}
func (b *runContainer16) readFromMsgpack(stream io.Reader) (int, error) {
err := msgp.Decode(stream, b)
return 0, err
}
var errCorruptedStream = errors.New("insufficient/odd number of stored bytes, corrupted stream detected")
func (b *runContainer16) readFrom(stream io.Reader) (int, error) {
b.iv = b.iv[:0]
b.card = 0
var numRuns uint16
err := binary.Read(stream, binary.LittleEndian, &numRuns)
if err != nil {
return 0, err
}
nr := int(numRuns)
encRun := make([]uint16, 2*nr)
by := make([]byte, 4*nr)
err = binary.Read(stream, binary.LittleEndian, &by)
if err != nil {
return 0, err
}
for i := range encRun {
if len(by) < 2 {
return 0, errCorruptedStream
}
encRun[i] = binary.LittleEndian.Uint16(by)
by = by[2:]
}
for i := 0; i < nr; i++ {
if i > 0 && b.iv[i-1].last() >= encRun[i*2] {
return 0, fmt.Errorf("error: stored runContainer had runs that were not in sorted order!! (b.iv[i-1=%v].last = %v >= encRun[i=%v] = %v)", i-1, b.iv[i-1].last(), i, encRun[i*2])
}
b.iv = append(b.iv, interval16{start: encRun[i*2], length: encRun[i*2+1]})
b.card += int64(encRun[i*2+1]) + 1
}
return 0, err
}

View File

@ -0,0 +1,118 @@
// +build !amd64,!386 appengine
package roaring
import (
"encoding/binary"
"io"
)
func (b *arrayContainer) writeTo(stream io.Writer) (int, error) {
buf := make([]byte, 2*len(b.content))
for i, v := range b.content {
base := i * 2
buf[base] = byte(v)
buf[base+1] = byte(v >> 8)
}
return stream.Write(buf)
}
func (b *arrayContainer) readFrom(stream io.Reader) (int, error) {
err := binary.Read(stream, binary.LittleEndian, b.content)
if err != nil {
return 0, err
}
return 2 * len(b.content), nil
}
func (b *bitmapContainer) writeTo(stream io.Writer) (int, error) {
// Write set
buf := make([]byte, 8*len(b.bitmap))
for i, v := range b.bitmap {
base := i * 8
buf[base] = byte(v)
buf[base+1] = byte(v >> 8)
buf[base+2] = byte(v >> 16)
buf[base+3] = byte(v >> 24)
buf[base+4] = byte(v >> 32)
buf[base+5] = byte(v >> 40)
buf[base+6] = byte(v >> 48)
buf[base+7] = byte(v >> 56)
}
return stream.Write(buf)
}
func (b *bitmapContainer) readFrom(stream io.Reader) (int, error) {
err := binary.Read(stream, binary.LittleEndian, b.bitmap)
if err != nil {
return 0, err
}
b.computeCardinality()
return 8 * len(b.bitmap), nil
}
func (bc *bitmapContainer) asLittleEndianByteSlice() []byte {
by := make([]byte, len(bc.bitmap)*8)
for i := range bc.bitmap {
binary.LittleEndian.PutUint64(by[i*8:], bc.bitmap[i])
}
return by
}
func uint64SliceAsByteSlice(slice []uint64) []byte {
by := make([]byte, len(slice)*8)
for i, v := range slice {
binary.LittleEndian.PutUint64(by[i*8:], v)
}
return by
}
func byteSliceAsUint16Slice(slice []byte) []uint16 {
if len(slice)%2 != 0 {
panic("Slice size should be divisible by 2")
}
b := make([]uint16, len(slice)/2)
for i := range b {
b[i] = binary.LittleEndian.Uint16(slice[2*i:])
}
return b
}
func byteSliceAsUint64Slice(slice []byte) []uint64 {
if len(slice)%8 != 0 {
panic("Slice size should be divisible by 8")
}
b := make([]uint64, len(slice)/8)
for i := range b {
b[i] = binary.LittleEndian.Uint64(slice[8*i:])
}
return b
}
// Converts a byte slice to a interval16 slice.
// The function assumes that the slice byte buffer is run container data
// encoded according to Roaring Format Spec
func byteSliceAsInterval16Slice(byteSlice []byte) []interval16 {
if len(byteSlice)%4 != 0 {
panic("Slice size should be divisible by 4")
}
intervalSlice := make([]interval16, len(byteSlice)/4)
for i := range intervalSlice {
intervalSlice[i] = interval16{
start: binary.LittleEndian.Uint16(byteSlice[i*4:]),
length: binary.LittleEndian.Uint16(byteSlice[i*4+2:]),
}
}
return intervalSlice
}

View File

@ -0,0 +1,113 @@
// +build 386 amd64,!appengine
package roaring
import (
"io"
"reflect"
"unsafe"
)
func (ac *arrayContainer) writeTo(stream io.Writer) (int, error) {
buf := uint16SliceAsByteSlice(ac.content)
return stream.Write(buf)
}
func (bc *bitmapContainer) writeTo(stream io.Writer) (int, error) {
buf := uint64SliceAsByteSlice(bc.bitmap)
return stream.Write(buf)
}
// readFrom reads an arrayContainer from stream.
// PRE-REQUISITE: you must size the arrayContainer correctly (allocate b.content)
// *before* you call readFrom. We can't guess the size in the stream
// by this point.
func (ac *arrayContainer) readFrom(stream io.Reader) (int, error) {
buf := uint16SliceAsByteSlice(ac.content)
return io.ReadFull(stream, buf)
}
func (bc *bitmapContainer) readFrom(stream io.Reader) (int, error) {
buf := uint64SliceAsByteSlice(bc.bitmap)
n, err := io.ReadFull(stream, buf)
bc.computeCardinality()
return n, err
}
func uint64SliceAsByteSlice(slice []uint64) []byte {
// make a new slice header
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
// update its capacity and length
header.Len *= 8
header.Cap *= 8
// return it
return *(*[]byte)(unsafe.Pointer(&header))
}
func uint16SliceAsByteSlice(slice []uint16) []byte {
// make a new slice header
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
// update its capacity and length
header.Len *= 2
header.Cap *= 2
// return it
return *(*[]byte)(unsafe.Pointer(&header))
}
func (bc *bitmapContainer) asLittleEndianByteSlice() []byte {
return uint64SliceAsByteSlice(bc.bitmap)
}
// Deserialization code follows
func byteSliceAsUint16Slice(slice []byte) []uint16 {
if len(slice)%2 != 0 {
panic("Slice size should be divisible by 2")
}
// make a new slice header
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
// update its capacity and length
header.Len /= 2
header.Cap /= 2
// return it
return *(*[]uint16)(unsafe.Pointer(&header))
}
func byteSliceAsUint64Slice(slice []byte) []uint64 {
if len(slice)%8 != 0 {
panic("Slice size should be divisible by 8")
}
// make a new slice header
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
// update its capacity and length
header.Len /= 8
header.Cap /= 8
// return it
return *(*[]uint64)(unsafe.Pointer(&header))
}
func byteSliceAsInterval16Slice(slice []byte) []interval16 {
if len(slice)%4 != 0 {
panic("Slice size should be divisible by 4")
}
// make a new slice header
header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
// update its capacity and length
header.Len /= 4
header.Cap /= 4
// return it
return *(*[]interval16)(unsafe.Pointer(&header))
}

View File

@ -0,0 +1,21 @@
// +build gofuzz
package roaring
import "bytes"
func FuzzSerializationStream(data []byte) int {
newrb := NewBitmap()
if _, err := newrb.ReadFrom(bytes.NewReader(data)); err != nil {
return 0
}
return 1
}
func FuzzSerializationBuffer(data []byte) int {
newrb := NewBitmap()
if _, err := newrb.FromBuffer(data); err != nil {
return 0
}
return 1
}

609
vendor/github.com/RoaringBitmap/roaring/setutil.go generated vendored Normal file
View File

@ -0,0 +1,609 @@
package roaring
func equal(a, b []uint16) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func difference(set1 []uint16, set2 []uint16, buffer []uint16) int {
if 0 == len(set2) {
for k := 0; k < len(set1); k++ {
buffer[k] = set1[k]
}
return len(set1)
}
if 0 == len(set1) {
return 0
}
pos := 0
k1 := 0
k2 := 0
buffer = buffer[:cap(buffer)]
s1 := set1[k1]
s2 := set2[k2]
for {
if s1 < s2 {
buffer[pos] = s1
pos++
k1++
if k1 >= len(set1) {
break
}
s1 = set1[k1]
} else if s1 == s2 {
k1++
k2++
if k1 >= len(set1) {
break
}
s1 = set1[k1]
if k2 >= len(set2) {
for ; k1 < len(set1); k1++ {
buffer[pos] = set1[k1]
pos++
}
break
}
s2 = set2[k2]
} else { // if (val1>val2)
k2++
if k2 >= len(set2) {
for ; k1 < len(set1); k1++ {
buffer[pos] = set1[k1]
pos++
}
break
}
s2 = set2[k2]
}
}
return pos
}
func exclusiveUnion2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
if 0 == len(set2) {
buffer = buffer[:len(set1)]
copy(buffer, set1[:])
return len(set1)
}
if 0 == len(set1) {
buffer = buffer[:len(set2)]
copy(buffer, set2[:])
return len(set2)
}
pos := 0
k1 := 0
k2 := 0
s1 := set1[k1]
s2 := set2[k2]
buffer = buffer[:cap(buffer)]
for {
if s1 < s2 {
buffer[pos] = s1
pos++
k1++
if k1 >= len(set1) {
for ; k2 < len(set2); k2++ {
buffer[pos] = set2[k2]
pos++
}
break
}
s1 = set1[k1]
} else if s1 == s2 {
k1++
k2++
if k1 >= len(set1) {
for ; k2 < len(set2); k2++ {
buffer[pos] = set2[k2]
pos++
}
break
}
if k2 >= len(set2) {
for ; k1 < len(set1); k1++ {
buffer[pos] = set1[k1]
pos++
}
break
}
s1 = set1[k1]
s2 = set2[k2]
} else { // if (val1>val2)
buffer[pos] = s2
pos++
k2++
if k2 >= len(set2) {
for ; k1 < len(set1); k1++ {
buffer[pos] = set1[k1]
pos++
}
break
}
s2 = set2[k2]
}
}
return pos
}
func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
pos := 0
k1 := 0
k2 := 0
if 0 == len(set2) {
buffer = buffer[:len(set1)]
copy(buffer, set1[:])
return len(set1)
}
if 0 == len(set1) {
buffer = buffer[:len(set2)]
copy(buffer, set2[:])
return len(set2)
}
s1 := set1[k1]
s2 := set2[k2]
buffer = buffer[:cap(buffer)]
for {
if s1 < s2 {
buffer[pos] = s1
pos++
k1++
if k1 >= len(set1) {
copy(buffer[pos:], set2[k2:])
pos += len(set2) - k2
break
}
s1 = set1[k1]
} else if s1 == s2 {
buffer[pos] = s1
pos++
k1++
k2++
if k1 >= len(set1) {
copy(buffer[pos:], set2[k2:])
pos += len(set2) - k2
break
}
if k2 >= len(set2) {
copy(buffer[pos:], set1[k1:])
pos += len(set1) - k1
break
}
s1 = set1[k1]
s2 = set2[k2]
} else { // if (set1[k1]>set2[k2])
buffer[pos] = s2
pos++
k2++
if k2 >= len(set2) {
copy(buffer[pos:], set1[k1:])
pos += len(set1) - k1
break
}
s2 = set2[k2]
}
}
return pos
}
func union2by2Cardinality(set1 []uint16, set2 []uint16) int {
pos := 0
k1 := 0
k2 := 0
if 0 == len(set2) {
return len(set1)
}
if 0 == len(set1) {
return len(set2)
}
s1 := set1[k1]
s2 := set2[k2]
for {
if s1 < s2 {
pos++
k1++
if k1 >= len(set1) {
pos += len(set2) - k2
break
}
s1 = set1[k1]
} else if s1 == s2 {
pos++
k1++
k2++
if k1 >= len(set1) {
pos += len(set2) - k2
break
}
if k2 >= len(set2) {
pos += len(set1) - k1
break
}
s1 = set1[k1]
s2 = set2[k2]
} else { // if (set1[k1]>set2[k2])
pos++
k2++
if k2 >= len(set2) {
pos += len(set1) - k1
break
}
s2 = set2[k2]
}
}
return pos
}
func intersection2by2(
set1 []uint16,
set2 []uint16,
buffer []uint16) int {
if len(set1)*64 < len(set2) {
return onesidedgallopingintersect2by2(set1, set2, buffer)
} else if len(set2)*64 < len(set1) {
return onesidedgallopingintersect2by2(set2, set1, buffer)
} else {
return localintersect2by2(set1, set2, buffer)
}
}
func intersection2by2Cardinality(
set1 []uint16,
set2 []uint16) int {
if len(set1)*64 < len(set2) {
return onesidedgallopingintersect2by2Cardinality(set1, set2)
} else if len(set2)*64 < len(set1) {
return onesidedgallopingintersect2by2Cardinality(set2, set1)
} else {
return localintersect2by2Cardinality(set1, set2)
}
}
func intersects2by2(
set1 []uint16,
set2 []uint16) bool {
// could be optimized if one set is much larger than the other one
if (0 == len(set1)) || (0 == len(set2)) {
return false
}
k1 := 0
k2 := 0
s1 := set1[k1]
s2 := set2[k2]
mainwhile:
for {
if s2 < s1 {
for {
k2++
if k2 == len(set2) {
break mainwhile
}
s2 = set2[k2]
if s2 >= s1 {
break
}
}
}
if s1 < s2 {
for {
k1++
if k1 == len(set1) {
break mainwhile
}
s1 = set1[k1]
if s1 >= s2 {
break
}
}
} else {
// (set2[k2] == set1[k1])
return true
}
}
return false
}
func localintersect2by2(
set1 []uint16,
set2 []uint16,
buffer []uint16) int {
if (0 == len(set1)) || (0 == len(set2)) {
return 0
}
k1 := 0
k2 := 0
pos := 0
buffer = buffer[:cap(buffer)]
s1 := set1[k1]
s2 := set2[k2]
mainwhile:
for {
if s2 < s1 {
for {
k2++
if k2 == len(set2) {
break mainwhile
}
s2 = set2[k2]
if s2 >= s1 {
break
}
}
}
if s1 < s2 {
for {
k1++
if k1 == len(set1) {
break mainwhile
}
s1 = set1[k1]
if s1 >= s2 {
break
}
}
} else {
// (set2[k2] == set1[k1])
buffer[pos] = s1
pos++
k1++
if k1 == len(set1) {
break
}
s1 = set1[k1]
k2++
if k2 == len(set2) {
break
}
s2 = set2[k2]
}
}
return pos
}
func localintersect2by2Cardinality(
set1 []uint16,
set2 []uint16) int {
if (0 == len(set1)) || (0 == len(set2)) {
return 0
}
k1 := 0
k2 := 0
pos := 0
s1 := set1[k1]
s2 := set2[k2]
mainwhile:
for {
if s2 < s1 {
for {
k2++
if k2 == len(set2) {
break mainwhile
}
s2 = set2[k2]
if s2 >= s1 {
break
}
}
}
if s1 < s2 {
for {
k1++
if k1 == len(set1) {
break mainwhile
}
s1 = set1[k1]
if s1 >= s2 {
break
}
}
} else {
// (set2[k2] == set1[k1])
pos++
k1++
if k1 == len(set1) {
break
}
s1 = set1[k1]
k2++
if k2 == len(set2) {
break
}
s2 = set2[k2]
}
}
return pos
}
func advanceUntil(
array []uint16,
pos int,
length int,
min uint16) int {
lower := pos + 1
if lower >= length || array[lower] >= min {
return lower
}
spansize := 1
for lower+spansize < length && array[lower+spansize] < min {
spansize *= 2
}
var upper int
if lower+spansize < length {
upper = lower + spansize
} else {
upper = length - 1
}
if array[upper] == min {
return upper
}
if array[upper] < min {
// means
// array
// has no
// item
// >= min
// pos = array.length;
return length
}
// we know that the next-smallest span was too small
lower += (spansize >> 1)
mid := 0
for lower+1 != upper {
mid = (lower + upper) >> 1
if array[mid] == min {
return mid
} else if array[mid] < min {
lower = mid
} else {
upper = mid
}
}
return upper
}
func onesidedgallopingintersect2by2(
smallset []uint16,
largeset []uint16,
buffer []uint16) int {
if 0 == len(smallset) {
return 0
}
buffer = buffer[:cap(buffer)]
k1 := 0
k2 := 0
pos := 0
s1 := largeset[k1]
s2 := smallset[k2]
mainwhile:
for {
if s1 < s2 {
k1 = advanceUntil(largeset, k1, len(largeset), s2)
if k1 == len(largeset) {
break mainwhile
}
s1 = largeset[k1]
}
if s2 < s1 {
k2++
if k2 == len(smallset) {
break mainwhile
}
s2 = smallset[k2]
} else {
buffer[pos] = s2
pos++
k2++
if k2 == len(smallset) {
break
}
s2 = smallset[k2]
k1 = advanceUntil(largeset, k1, len(largeset), s2)
if k1 == len(largeset) {
break mainwhile
}
s1 = largeset[k1]
}
}
return pos
}
func onesidedgallopingintersect2by2Cardinality(
smallset []uint16,
largeset []uint16) int {
if 0 == len(smallset) {
return 0
}
k1 := 0
k2 := 0
pos := 0
s1 := largeset[k1]
s2 := smallset[k2]
mainwhile:
for {
if s1 < s2 {
k1 = advanceUntil(largeset, k1, len(largeset), s2)
if k1 == len(largeset) {
break mainwhile
}
s1 = largeset[k1]
}
if s2 < s1 {
k2++
if k2 == len(smallset) {
break mainwhile
}
s2 = smallset[k2]
} else {
pos++
k2++
if k2 == len(smallset) {
break
}
s2 = smallset[k2]
k1 = advanceUntil(largeset, k1, len(largeset), s2)
if k1 == len(largeset) {
break mainwhile
}
s1 = largeset[k1]
}
}
return pos
}
func binarySearch(array []uint16, ikey uint16) int {
low := 0
high := len(array) - 1
for low+16 <= high {
middleIndex := int(uint32(low+high) >> 1)
middleValue := array[middleIndex]
if middleValue < ikey {
low = middleIndex + 1
} else if middleValue > ikey {
high = middleIndex - 1
} else {
return middleIndex
}
}
for ; low <= high; low++ {
val := array[low]
if val >= ikey {
if val == ikey {
return low
}
break
}
}
return -(low + 1)
}

View File

@ -0,0 +1,21 @@
package roaring
type shortIterable interface {
hasNext() bool
next() uint16
}
type shortIterator struct {
slice []uint16
loc int
}
func (si *shortIterator) hasNext() bool {
return si.loc < len(si.slice)
}
func (si *shortIterator) next() uint16 {
a := si.slice[si.loc]
si.loc++
return a
}

383
vendor/github.com/RoaringBitmap/roaring/smat.go generated vendored Normal file
View File

@ -0,0 +1,383 @@
// +build gofuzz
/*
# Instructions for smat testing for roaring
[smat](https://github.com/mschoch/smat) is a framework that provides
state machine assisted fuzz testing.
To run the smat tests for roaring...
## Prerequisites
$ go get github.com/dvyukov/go-fuzz/go-fuzz
$ go get github.com/dvyukov/go-fuzz/go-fuzz-build
## Steps
1. Generate initial smat corpus:
```
go test -tags=gofuzz -run=TestGenerateSmatCorpus
```
2. Build go-fuzz test program with instrumentation:
```
go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring
```
3. Run go-fuzz:
```
go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
```
You should see output like...
```
2016/09/16 13:58:35 slaves: 8, corpus: 1 (3s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 3s
2016/09/16 13:58:38 slaves: 8, corpus: 1 (6s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 6s
2016/09/16 13:58:41 slaves: 8, corpus: 1 (9s ago), crashers: 0, restarts: 1/44, execs: 44 (5/sec), cover: 0, uptime: 9s
2016/09/16 13:58:44 slaves: 8, corpus: 1 (12s ago), crashers: 0, restarts: 1/45, execs: 45 (4/sec), cover: 0, uptime: 12s
2016/09/16 13:58:47 slaves: 8, corpus: 1 (15s ago), crashers: 0, restarts: 1/46, execs: 46 (3/sec), cover: 0, uptime: 15s
2016/09/16 13:58:50 slaves: 8, corpus: 1 (18s ago), crashers: 0, restarts: 1/47, execs: 47 (3/sec), cover: 0, uptime: 18s
2016/09/16 13:58:53 slaves: 8, corpus: 1 (21s ago), crashers: 0, restarts: 1/63, execs: 63 (3/sec), cover: 0, uptime: 21s
2016/09/16 13:58:56 slaves: 8, corpus: 1 (24s ago), crashers: 0, restarts: 1/65, execs: 65 (3/sec), cover: 0, uptime: 24s
2016/09/16 13:58:59 slaves: 8, corpus: 1 (27s ago), crashers: 0, restarts: 1/66, execs: 66 (2/sec), cover: 0, uptime: 27s
2016/09/16 13:59:02 slaves: 8, corpus: 1 (30s ago), crashers: 0, restarts: 1/67, execs: 67 (2/sec), cover: 0, uptime: 30s
2016/09/16 13:59:05 slaves: 8, corpus: 1 (33s ago), crashers: 0, restarts: 1/83, execs: 83 (3/sec), cover: 0, uptime: 33s
2016/09/16 13:59:08 slaves: 8, corpus: 1 (36s ago), crashers: 0, restarts: 1/84, execs: 84 (2/sec), cover: 0, uptime: 36s
2016/09/16 13:59:11 slaves: 8, corpus: 2 (0s ago), crashers: 0, restarts: 1/85, execs: 85 (2/sec), cover: 0, uptime: 39s
2016/09/16 13:59:14 slaves: 8, corpus: 17 (2s ago), crashers: 0, restarts: 1/86, execs: 86 (2/sec), cover: 480, uptime: 42s
2016/09/16 13:59:17 slaves: 8, corpus: 17 (5s ago), crashers: 0, restarts: 1/66, execs: 132 (3/sec), cover: 487, uptime: 45s
2016/09/16 13:59:20 slaves: 8, corpus: 17 (8s ago), crashers: 0, restarts: 1/440, execs: 2645 (55/sec), cover: 487, uptime: 48s
```
Let it run, and if the # of crashers is > 0, check out the reports in
the workdir where you should be able to find the panic goroutine stack
traces.
*/
package roaring
import (
"fmt"
"sort"
"github.com/mschoch/smat"
"github.com/willf/bitset"
)
// fuzz test using state machine driven by byte stream.
func FuzzSmat(data []byte) int {
return smat.Fuzz(&smatContext{}, smat.ActionID('S'), smat.ActionID('T'),
smatActionMap, data)
}
var smatDebug = false
func smatLog(prefix, format string, args ...interface{}) {
if smatDebug {
fmt.Print(prefix)
fmt.Printf(format, args...)
}
}
type smatContext struct {
pairs []*smatPair
// Two registers, x & y.
x int
y int
actions int
}
type smatPair struct {
bm *Bitmap
bs *bitset.BitSet
}
// ------------------------------------------------------------------
var smatActionMap = smat.ActionMap{
smat.ActionID('X'): smatAction("x++", smatWrap(func(c *smatContext) { c.x++ })),
smat.ActionID('x'): smatAction("x--", smatWrap(func(c *smatContext) { c.x-- })),
smat.ActionID('Y'): smatAction("y++", smatWrap(func(c *smatContext) { c.y++ })),
smat.ActionID('y'): smatAction("y--", smatWrap(func(c *smatContext) { c.y-- })),
smat.ActionID('*'): smatAction("x*y", smatWrap(func(c *smatContext) { c.x = c.x * c.y })),
smat.ActionID('<'): smatAction("x<<", smatWrap(func(c *smatContext) { c.x = c.x << 1 })),
smat.ActionID('^'): smatAction("swap", smatWrap(func(c *smatContext) { c.x, c.y = c.y, c.x })),
smat.ActionID('['): smatAction(" pushPair", smatWrap(smatPushPair)),
smat.ActionID(']'): smatAction(" popPair", smatWrap(smatPopPair)),
smat.ActionID('B'): smatAction(" setBit", smatWrap(smatSetBit)),
smat.ActionID('b'): smatAction(" removeBit", smatWrap(smatRemoveBit)),
smat.ActionID('o'): smatAction(" or", smatWrap(smatOr)),
smat.ActionID('a'): smatAction(" and", smatWrap(smatAnd)),
smat.ActionID('#'): smatAction(" cardinality", smatWrap(smatCardinality)),
smat.ActionID('O'): smatAction(" orCardinality", smatWrap(smatOrCardinality)),
smat.ActionID('A'): smatAction(" andCardinality", smatWrap(smatAndCardinality)),
smat.ActionID('c'): smatAction(" clear", smatWrap(smatClear)),
smat.ActionID('r'): smatAction(" runOptimize", smatWrap(smatRunOptimize)),
smat.ActionID('e'): smatAction(" isEmpty", smatWrap(smatIsEmpty)),
smat.ActionID('i'): smatAction(" intersects", smatWrap(smatIntersects)),
smat.ActionID('f'): smatAction(" flip", smatWrap(smatFlip)),
smat.ActionID('-'): smatAction(" difference", smatWrap(smatDifference)),
}
var smatRunningPercentActions []smat.PercentAction
func init() {
var ids []int
for actionId := range smatActionMap {
ids = append(ids, int(actionId))
}
sort.Ints(ids)
pct := 100 / len(smatActionMap)
for _, actionId := range ids {
smatRunningPercentActions = append(smatRunningPercentActions,
smat.PercentAction{pct, smat.ActionID(actionId)})
}
smatActionMap[smat.ActionID('S')] = smatAction("SETUP", smatSetupFunc)
smatActionMap[smat.ActionID('T')] = smatAction("TEARDOWN", smatTeardownFunc)
}
// We only have one smat state: running.
func smatRunning(next byte) smat.ActionID {
return smat.PercentExecute(next, smatRunningPercentActions...)
}
func smatAction(name string, f func(ctx smat.Context) (smat.State, error)) func(smat.Context) (smat.State, error) {
return func(ctx smat.Context) (smat.State, error) {
c := ctx.(*smatContext)
c.actions++
smatLog(" ", "%s\n", name)
return f(ctx)
}
}
// Creates an smat action func based on a simple callback.
func smatWrap(cb func(c *smatContext)) func(smat.Context) (next smat.State, err error) {
return func(ctx smat.Context) (next smat.State, err error) {
c := ctx.(*smatContext)
cb(c)
return smatRunning, nil
}
}
// Invokes a callback function with the input v bounded to len(c.pairs).
func (c *smatContext) withPair(v int, cb func(*smatPair)) {
if len(c.pairs) > 0 {
if v < 0 {
v = -v
}
v = v % len(c.pairs)
cb(c.pairs[v])
}
}
// ------------------------------------------------------------------
func smatSetupFunc(ctx smat.Context) (next smat.State, err error) {
return smatRunning, nil
}
func smatTeardownFunc(ctx smat.Context) (next smat.State, err error) {
return nil, err
}
// ------------------------------------------------------------------
func smatPushPair(c *smatContext) {
c.pairs = append(c.pairs, &smatPair{
bm: NewBitmap(),
bs: bitset.New(100),
})
}
func smatPopPair(c *smatContext) {
if len(c.pairs) > 0 {
c.pairs = c.pairs[0 : len(c.pairs)-1]
}
}
func smatSetBit(c *smatContext) {
c.withPair(c.x, func(p *smatPair) {
y := uint32(c.y)
p.bm.AddInt(int(y))
p.bs.Set(uint(y))
p.checkEquals()
})
}
func smatRemoveBit(c *smatContext) {
c.withPair(c.x, func(p *smatPair) {
y := uint32(c.y)
p.bm.Remove(y)
p.bs.Clear(uint(y))
p.checkEquals()
})
}
func smatAnd(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c.withPair(c.y, func(py *smatPair) {
px.bm.And(py.bm)
px.bs = px.bs.Intersection(py.bs)
px.checkEquals()
py.checkEquals()
})
})
}
func smatOr(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c.withPair(c.y, func(py *smatPair) {
px.bm.Or(py.bm)
px.bs = px.bs.Union(py.bs)
px.checkEquals()
py.checkEquals()
})
})
}
func smatAndCardinality(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c.withPair(c.y, func(py *smatPair) {
c0 := px.bm.AndCardinality(py.bm)
c1 := px.bs.IntersectionCardinality(py.bs)
if c0 != uint64(c1) {
panic("expected same add cardinality")
}
px.checkEquals()
py.checkEquals()
})
})
}
func smatOrCardinality(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c.withPair(c.y, func(py *smatPair) {
c0 := px.bm.OrCardinality(py.bm)
c1 := px.bs.UnionCardinality(py.bs)
if c0 != uint64(c1) {
panic("expected same or cardinality")
}
px.checkEquals()
py.checkEquals()
})
})
}
func smatRunOptimize(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
px.bm.RunOptimize()
px.checkEquals()
})
}
func smatClear(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
px.bm.Clear()
px.bs = px.bs.ClearAll()
px.checkEquals()
})
}
func smatCardinality(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c0 := px.bm.GetCardinality()
c1 := px.bs.Count()
if c0 != uint64(c1) {
panic("expected same cardinality")
}
})
}
func smatIsEmpty(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c0 := px.bm.IsEmpty()
c1 := px.bs.None()
if c0 != c1 {
panic("expected same is empty")
}
})
}
func smatIntersects(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c.withPair(c.y, func(py *smatPair) {
v0 := px.bm.Intersects(py.bm)
v1 := px.bs.IntersectionCardinality(py.bs) > 0
if v0 != v1 {
panic("intersects not equal")
}
px.checkEquals()
py.checkEquals()
})
})
}
func smatFlip(c *smatContext) {
c.withPair(c.x, func(p *smatPair) {
y := uint32(c.y)
p.bm.Flip(uint64(y), uint64(y)+1)
p.bs = p.bs.Flip(uint(y))
p.checkEquals()
})
}
func smatDifference(c *smatContext) {
c.withPair(c.x, func(px *smatPair) {
c.withPair(c.y, func(py *smatPair) {
px.bm.AndNot(py.bm)
px.bs = px.bs.Difference(py.bs)
px.checkEquals()
py.checkEquals()
})
})
}
func (p *smatPair) checkEquals() {
if !p.equalsBitSet(p.bs, p.bm) {
panic("bitset mismatch")
}
}
func (p *smatPair) equalsBitSet(a *bitset.BitSet, b *Bitmap) bool {
for i, e := a.NextSet(0); e; i, e = a.NextSet(i + 1) {
if !b.ContainsInt(int(i)) {
fmt.Printf("in a bitset, not b bitmap, i: %d\n", i)
fmt.Printf(" a bitset: %s\n b bitmap: %s\n",
a.String(), b.String())
return false
}
}
i := b.Iterator()
for i.HasNext() {
v := i.Next()
if !a.Test(uint(v)) {
fmt.Printf("in b bitmap, not a bitset, v: %d\n", v)
fmt.Printf(" a bitset: %s\n b bitmap: %s\n",
a.String(), b.String())
return false
}
}
return true
}

315
vendor/github.com/RoaringBitmap/roaring/util.go generated vendored Normal file
View File

@ -0,0 +1,315 @@
package roaring
import (
"math/rand"
"sort"
)
const (
arrayDefaultMaxSize = 4096 // containers with 4096 or fewer integers should be array containers.
arrayLazyLowerBound = 1024
maxCapacity = 1 << 16
serialCookieNoRunContainer = 12346 // only arrays and bitmaps
invalidCardinality = -1
serialCookie = 12347 // runs, arrays, and bitmaps
noOffsetThreshold = 4
// Compute wordSizeInBytes, the size of a word in bytes.
_m = ^uint64(0)
_logS = _m>>8&1 + _m>>16&1 + _m>>32&1
wordSizeInBytes = 1 << _logS
// other constants used in ctz_generic.go
wordSizeInBits = wordSizeInBytes << 3 // word size in bits
)
const maxWord = 1<<wordSizeInBits - 1
// doesn't apply to runContainers
func getSizeInBytesFromCardinality(card int) int {
if card > arrayDefaultMaxSize {
// bitmapContainer
return maxCapacity / 8
}
// arrayContainer
return 2 * card
}
func fill(arr []uint64, val uint64) {
for i := range arr {
arr[i] = val
}
}
func fillRange(arr []uint64, start, end int, val uint64) {
for i := start; i < end; i++ {
arr[i] = val
}
}
func fillArrayAND(container []uint16, bitmap1, bitmap2 []uint64) {
if len(bitmap1) != len(bitmap2) {
panic("array lengths don't match")
}
// TODO: rewrite in assembly
pos := 0
for k := range bitmap1 {
bitset := bitmap1[k] & bitmap2[k]
for bitset != 0 {
t := bitset & -bitset
container[pos] = uint16((k*64 + int(popcount(t-1))))
pos = pos + 1
bitset ^= t
}
}
}
func fillArrayANDNOT(container []uint16, bitmap1, bitmap2 []uint64) {
if len(bitmap1) != len(bitmap2) {
panic("array lengths don't match")
}
// TODO: rewrite in assembly
pos := 0
for k := range bitmap1 {
bitset := bitmap1[k] &^ bitmap2[k]
for bitset != 0 {
t := bitset & -bitset
container[pos] = uint16((k*64 + int(popcount(t-1))))
pos = pos + 1
bitset ^= t
}
}
}
func fillArrayXOR(container []uint16, bitmap1, bitmap2 []uint64) {
if len(bitmap1) != len(bitmap2) {
panic("array lengths don't match")
}
// TODO: rewrite in assembly
pos := 0
for k := 0; k < len(bitmap1); k++ {
bitset := bitmap1[k] ^ bitmap2[k]
for bitset != 0 {
t := bitset & -bitset
container[pos] = uint16((k*64 + int(popcount(t-1))))
pos = pos + 1
bitset ^= t
}
}
}
func highbits(x uint32) uint16 {
return uint16(x >> 16)
}
func lowbits(x uint32) uint16 {
return uint16(x & 0xFFFF)
}
const maxLowBit = 0xFFFF
func flipBitmapRange(bitmap []uint64, start int, end int) {
if start >= end {
return
}
firstword := start / 64
endword := (end - 1) / 64
bitmap[firstword] ^= ^(^uint64(0) << uint(start%64))
for i := firstword; i < endword; i++ {
//p("flipBitmapRange on i=%v", i)
bitmap[i] = ^bitmap[i]
}
bitmap[endword] ^= ^uint64(0) >> (uint(-end) % 64)
}
func resetBitmapRange(bitmap []uint64, start int, end int) {
if start >= end {
return
}
firstword := start / 64
endword := (end - 1) / 64
if firstword == endword {
bitmap[firstword] &= ^((^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64)))
return
}
bitmap[firstword] &= ^(^uint64(0) << uint(start%64))
for i := firstword + 1; i < endword; i++ {
bitmap[i] = 0
}
bitmap[endword] &= ^(^uint64(0) >> (uint(-end) % 64))
}
func setBitmapRange(bitmap []uint64, start int, end int) {
if start >= end {
return
}
firstword := start / 64
endword := (end - 1) / 64
if firstword == endword {
bitmap[firstword] |= (^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64))
return
}
bitmap[firstword] |= ^uint64(0) << uint(start%64)
for i := firstword + 1; i < endword; i++ {
bitmap[i] = ^uint64(0)
}
bitmap[endword] |= ^uint64(0) >> (uint(-end) % 64)
}
func flipBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
before := wordCardinalityForBitmapRange(bitmap, start, end)
flipBitmapRange(bitmap, start, end)
after := wordCardinalityForBitmapRange(bitmap, start, end)
return int(after - before)
}
func resetBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
before := wordCardinalityForBitmapRange(bitmap, start, end)
resetBitmapRange(bitmap, start, end)
after := wordCardinalityForBitmapRange(bitmap, start, end)
return int(after - before)
}
func setBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
before := wordCardinalityForBitmapRange(bitmap, start, end)
setBitmapRange(bitmap, start, end)
after := wordCardinalityForBitmapRange(bitmap, start, end)
return int(after - before)
}
func wordCardinalityForBitmapRange(bitmap []uint64, start int, end int) uint64 {
answer := uint64(0)
if start >= end {
return answer
}
firstword := start / 64
endword := (end - 1) / 64
for i := firstword; i <= endword; i++ {
answer += popcount(bitmap[i])
}
return answer
}
func selectBitPosition(w uint64, j int) int {
seen := 0
// Divide 64bit
part := w & 0xFFFFFFFF
n := popcount(part)
if n <= uint64(j) {
part = w >> 32
seen += 32
j -= int(n)
}
w = part
// Divide 32bit
part = w & 0xFFFF
n = popcount(part)
if n <= uint64(j) {
part = w >> 16
seen += 16
j -= int(n)
}
w = part
// Divide 16bit
part = w & 0xFF
n = popcount(part)
if n <= uint64(j) {
part = w >> 8
seen += 8
j -= int(n)
}
w = part
// Lookup in final byte
var counter uint
for counter = 0; counter < 8; counter++ {
j -= int((w >> counter) & 1)
if j < 0 {
break
}
}
return seen + int(counter)
}
func panicOn(err error) {
if err != nil {
panic(err)
}
}
type ph struct {
orig int
rand int
}
type pha []ph
func (p pha) Len() int { return len(p) }
func (p pha) Less(i, j int) bool { return p[i].rand < p[j].rand }
func (p pha) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func getRandomPermutation(n int) []int {
r := make([]ph, n)
for i := 0; i < n; i++ {
r[i].orig = i
r[i].rand = rand.Intn(1 << 29)
}
sort.Sort(pha(r))
m := make([]int, n)
for i := range m {
m[i] = r[i].orig
}
return m
}
func minOfInt(a, b int) int {
if a < b {
return a
}
return b
}
func maxOfInt(a, b int) int {
if a > b {
return a
}
return b
}
func maxOfUint16(a, b uint16) uint16 {
if a > b {
return a
}
return b
}
func minOfUint16(a, b uint16) uint16 {
if a < b {
return a
}
return b
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}
func maxUint16(a, b uint16) uint16 {
if a > b {
return a
}
return b
}
func minUint16(a, b uint16) uint16 {
if a < b {
return a
}
return b
}

22
vendor/github.com/Smerity/govarint/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Stephen Merity
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

Some files were not shown because too many files have changed in this diff Show More