First split into crockery init
and crockery run
The init command creates a crockery.db file containing the domain name and TLS keypair. The run command starts IMAP and SMTP services based on that file. Supporting only a single domain is starting to look a bit unnecessary. We'll see how that goes.
This commit is contained in:
21
DESIGN.md
21
DESIGN.md
@@ -1,5 +1,7 @@
|
||||
# Crockery design
|
||||
|
||||
## Overview
|
||||
|
||||
We need:
|
||||
|
||||
* SMTP server
|
||||
@@ -18,4 +20,23 @@ We need:
|
||||
* Search emails - some sort of inverted index necessary
|
||||
* https://github.com/blevesearch/bleve ?
|
||||
* Embedded database best. Ideally we have a single file to work with
|
||||
* https://github.com/coreos/bbolt
|
||||
* https://github.com/asdine/storm
|
||||
|
||||
## Database structure
|
||||
|
||||
```
|
||||
config/
|
||||
domain: "example.com"
|
||||
domains/
|
||||
example.com/
|
||||
accounts/
|
||||
foo@example.com/
|
||||
config/
|
||||
emails/
|
||||
config/
|
||||
key: []byte(...)
|
||||
cert: []byte(...)
|
||||
|
||||
|
||||
```
|
||||
|
33
Gopkg.lock
generated
33
Gopkg.lock
generated
@@ -1,6 +1,25 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/asdine/storm"
|
||||
packages = [
|
||||
".",
|
||||
"codec",
|
||||
"codec/json",
|
||||
"index",
|
||||
"internal",
|
||||
"q"
|
||||
]
|
||||
revision = "68fc73b635f890fe7ba2f3b15ce80c85b28a744f"
|
||||
version = "v2.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/emersion/go-imap"
|
||||
packages = [
|
||||
@@ -26,6 +45,12 @@
|
||||
packages = ["."]
|
||||
revision = "a63104657743890cb7c2fd54f15a2725291f6a9f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "dd2ff4accc098aceecb86b36eaa7829b2a17b1c9"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
@@ -38,9 +63,15 @@
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/urfave/cli.v1"
|
||||
packages = ["."]
|
||||
revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1"
|
||||
version = "v1.20.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "a8a093c114e9fd6d1de5e43c061f3cf1d08fa5279da70af9c0cd030030fe7737"
|
||||
inputs-digest = "5b164a447403365e9da1c9b7f1a085a9442ff1afd241346f5d4b5c25c62bdac2"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
@@ -2,58 +2,145 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
|
||||
"ur.gs/crockery/internal/services"
|
||||
"ur.gs/crockery/internal/store"
|
||||
)
|
||||
|
||||
var (
|
||||
domain = flag.String("domain", "", "Domain to serve email for")
|
||||
certFile = flag.String("cert", "", "Path to a PEM-encoded certificate bundle for TLS support")
|
||||
keyFile = flag.String("key", "", "Path to a PEM-encoded key for TLS support")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *domain == "" {
|
||||
log.Fatal("A domain must be specified on the command line (for now!)")
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Name = "crockery"
|
||||
app.Usage = "A vertically integrated, single-domain email system"
|
||||
app.Authors = []cli.Author{
|
||||
{Name: "Nick Thomas", Email: "crockery@ur.gs"},
|
||||
}
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "db",
|
||||
Usage: "Database file to use",
|
||||
EnvVar: "CROCKERY_DB",
|
||||
Value: "crockery.db",
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "init",
|
||||
Usage: "Create a new Crockery database",
|
||||
Action: crockeryInit,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "domain",
|
||||
Usage: "Domain to serve email for",
|
||||
EnvVar: "CROCKERY_DOMAIN",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert",
|
||||
Usage: "File containing the PEM-encoded certificate bundle for the domain",
|
||||
EnvVar: "CROCKERY_CERT_FILE",
|
||||
},
|
||||
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "File containing the PEM-encoded private key for the domain",
|
||||
EnvVar: "CROCKERY_KEY_FILE",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "run",
|
||||
Usage: "Run crockery",
|
||||
Action: crockeryRun,
|
||||
},
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func crockeryInit(c *cli.Context) error {
|
||||
db := c.GlobalString("db")
|
||||
domain := c.String("domain")
|
||||
certFile := c.String("cert")
|
||||
keyFile := c.String("key")
|
||||
|
||||
if db == "" {
|
||||
return fmt.Errorf("No crockery database file specified")
|
||||
}
|
||||
|
||||
if domain == "" {
|
||||
return fmt.Errorf("A domain must be specified")
|
||||
}
|
||||
|
||||
// FIXME: we can make cert+key optional at some point and have them be
|
||||
// generated on-demand via ACME during `crockery run` instead.
|
||||
if certFile == "" {
|
||||
return fmt.Errorf("A certificate file must be specified")
|
||||
}
|
||||
|
||||
if keyFile == "" {
|
||||
return fmt.Errorf("A key file must be specified")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(db); !os.IsNotExist(err) {
|
||||
return fmt.Errorf("File %q already exists, refusing to overwrite", db)
|
||||
}
|
||||
|
||||
certPEM, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read certificate from %q: %v", certFile, err)
|
||||
}
|
||||
|
||||
keyPEM, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read key from %q: %v", keyFile, err)
|
||||
}
|
||||
|
||||
if err := store.Init(db, domain, certPEM, keyPEM); err != nil {
|
||||
return fmt.Errorf("Couldn't initialize datase %q: %v", db, err)
|
||||
}
|
||||
|
||||
log.Printf("Created %q for domain %q. Next step:\n\t%s -db %s run", db, domain, os.Args[0], db)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func crockeryRun(c *cli.Context) error {
|
||||
db := c.GlobalString("db")
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
datastore, err := store.New(ctx, "crockery.db")
|
||||
if db == "" {
|
||||
return fmt.Errorf("No crockery database file specified")
|
||||
}
|
||||
|
||||
log.Printf("Loading config from file %q", db)
|
||||
|
||||
datastore, err := store.New(context.Background(), db)
|
||||
if err != nil {
|
||||
log.Fatal("Couldn't open crockery.db:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME: This will eventually come from the datastore itself, via `crockery init`
|
||||
datastore.SetDomain(*domain)
|
||||
log.Printf("Running as %s", datastore.Domain())
|
||||
defer datastore.Close()
|
||||
|
||||
// FIXME: This will eventually come from the datastore itself, via ACME
|
||||
if *certFile != "" || *keyFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't setup TLS: %v", err)
|
||||
}
|
||||
|
||||
if cert.PrivateKey == nil {
|
||||
log.Fatal("No private key for TLS certificate")
|
||||
}
|
||||
|
||||
datastore.SetTLS(cert)
|
||||
log.Print("Successfully loaded TLS certificate and key")
|
||||
if datastore.Domain() == "" {
|
||||
return fmt.Errorf("No domain configured in file %q", db)
|
||||
}
|
||||
|
||||
log.Printf("Starting crockery for domain %q", datastore.Domain())
|
||||
|
||||
srv, err := services.New(ctx, datastore)
|
||||
if err != nil {
|
||||
log.Fatal("Couldn't start services:", err)
|
||||
return fmt.Errorf("Couldn't start services:", err)
|
||||
}
|
||||
|
||||
sig := make(chan os.Signal, 1)
|
||||
@@ -73,7 +160,8 @@ func main() {
|
||||
}()
|
||||
|
||||
<-done
|
||||
|
||||
log.Println("All services finished, exiting")
|
||||
|
||||
os.Exit(0)
|
||||
return nil
|
||||
}
|
||||
|
@@ -3,6 +3,11 @@ package store
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/asdine/storm"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
@@ -10,20 +15,61 @@ type Interface interface {
|
||||
TLS() tls.Certificate
|
||||
TLSConfig() *tls.Config
|
||||
|
||||
SetDomain(string)
|
||||
SetTLS(tls.Certificate)
|
||||
SetDomain(string) error
|
||||
SetTLS([]byte, []byte) error
|
||||
|
||||
io.Closer
|
||||
}
|
||||
|
||||
func New(ctx context.Context, filename string) (Interface, error) {
|
||||
return &concrete{
|
||||
db, err := storm.Open(filename, storm.BoltOptions(0600, &bolt.Options{}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := &concrete{
|
||||
filename: filename,
|
||||
}, nil
|
||||
storm: db,
|
||||
}
|
||||
|
||||
if err := out.setup(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func Init(filename, domain string, certPEM, keyPEM []byte) error {
|
||||
db, err := storm.Open(filename, storm.BoltOptions(0600, &bolt.Options{}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
builder := &concrete{
|
||||
filename: filename,
|
||||
storm: db,
|
||||
}
|
||||
|
||||
defer builder.Close()
|
||||
|
||||
if err := builder.SetDomain(domain); err != nil {
|
||||
return fmt.Errorf("Couldn't set domain: %v", err)
|
||||
}
|
||||
|
||||
if err := builder.SetTLS(certPEM, keyPEM); err != nil {
|
||||
return fmt.Errorf("Couldn't set TLS: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type concrete struct {
|
||||
filename string
|
||||
filename string
|
||||
storm *storm.DB
|
||||
domainBucket storm.Node
|
||||
|
||||
// TODO: these will eventually be persisted to the file in `filename`
|
||||
// These are persisted in BoltDB, but we
|
||||
// Might as well keep them in memory for the duration, though.
|
||||
domain string
|
||||
cert tls.Certificate
|
||||
}
|
||||
@@ -48,14 +94,82 @@ func (c *concrete) TLSConfig() *tls.Config {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *concrete) SetDomain(domain string) {
|
||||
func (c *concrete) SetDomain(domain string) error {
|
||||
if err := c.storm.Set("config", "domain", domain); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.domain = domain
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *concrete) SetTLS(cert tls.Certificate) {
|
||||
func (c *concrete) SetTLS(certPEM, keyPEM []byte) error {
|
||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't parse data as TLS keypair: %v", err)
|
||||
}
|
||||
|
||||
if cert.PrivateKey == nil {
|
||||
return fmt.Errorf("No private key for TLS certificate")
|
||||
}
|
||||
|
||||
domainBucket := c.storm.From("domains").From(c.Domain())
|
||||
|
||||
tx, err := domainBucket.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Set("config", "cert", certPEM); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Set("config", "key", keyPEM); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.cert = cert
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *concrete) Close() error {
|
||||
return c.storm.Close()
|
||||
}
|
||||
|
||||
// Bootstraps all cached values from storm
|
||||
func (c *concrete) setup() error {
|
||||
var domain string
|
||||
var keyPEM []byte
|
||||
var certPEM []byte
|
||||
|
||||
if err := c.storm.Get("config", "domain", &domain); err != nil {
|
||||
return fmt.Errorf("Couldn't read config/domain: %v", err)
|
||||
}
|
||||
|
||||
domainBucket := c.storm.From("domains").From(domain)
|
||||
|
||||
if err := domainBucket.Get("config", "cert", &certPEM); err != nil {
|
||||
return fmt.Errorf("Couldn't read domains/%s/config/cert: %v", domain, err)
|
||||
}
|
||||
|
||||
if err := domainBucket.Get("config", "key", &keyPEM); err != nil {
|
||||
return fmt.Errorf("Couldn't read domains/%s/config/key: %v", domain, err)
|
||||
}
|
||||
|
||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't parse key and certificate: %v", err)
|
||||
}
|
||||
|
||||
c.domainBucket = domainBucket
|
||||
c.domain = domain
|
||||
c.cert = cert
|
||||
|
||||
return nil
|
||||
}
|
||||
|
29
vendor/github.com/asdine/storm/.gitignore
generated
vendored
Normal file
29
vendor/github.com/asdine/storm/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.iml
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
13
vendor/github.com/asdine/storm/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/asdine/storm/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
before_install:
|
||||
- go get github.com/stretchr/testify
|
||||
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
21
vendor/github.com/asdine/storm/LICENSE
generated
vendored
Normal file
21
vendor/github.com/asdine/storm/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) [2017] [Asdine El Hrychy]
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
633
vendor/github.com/asdine/storm/README.md
generated
vendored
Normal file
633
vendor/github.com/asdine/storm/README.md
generated
vendored
Normal file
@@ -0,0 +1,633 @@
|
||||
# Storm
|
||||
|
||||
[](https://travis-ci.org/asdine/storm)
|
||||
[](https://godoc.org/github.com/asdine/storm)
|
||||
[](https://goreportcard.com/report/github.com/asdine/storm)
|
||||
|
||||
Storm is a simple and powerful toolkit for [BoltDB](https://github.com/coreos/bbolt). Basically, Storm provides indexes, a wide range of methods to store and fetch data, an advanced query system, and much more.
|
||||
|
||||
In addition to the examples below, see also the [examples in the GoDoc](https://godoc.org/github.com/asdine/storm#pkg-examples).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Import Storm](#import-storm)
|
||||
- [Open a database](#open-a-database)
|
||||
- [Simple CRUD system](#simple-crud-system)
|
||||
- [Declare your structures](#declare-your-structures)
|
||||
- [Save your object](#save-your-object)
|
||||
- [Auto Increment](#auto-increment)
|
||||
- [Simple queries](#simple-queries)
|
||||
- [Fetch one object](#fetch-one-object)
|
||||
- [Fetch multiple objects](#fetch-multiple-objects)
|
||||
- [Fetch all objects](#fetch-all-objects)
|
||||
- [Fetch all objects sorted by index](#fetch-all-objects-sorted-by-index)
|
||||
- [Fetch a range of objects](#fetch-a-range-of-objects)
|
||||
- [Fetch objects by prefix](#fetch-objects-by-prefix)
|
||||
- [Skip, Limit and Reverse](#skip-limit-and-reverse)
|
||||
- [Delete an object](#delete-an-object)
|
||||
- [Update an object](#update-an-object)
|
||||
- [Initialize buckets and indexes before saving an object](#initialize-buckets-and-indexes-before-saving-an-object)
|
||||
- [Drop a bucket](#drop-a-bucket)
|
||||
- [Re-index a bucket](#re-index-a-bucket)
|
||||
- [Advanced queries](#advanced-queries)
|
||||
- [Transactions](#transactions)
|
||||
- [Options](#options)
|
||||
- [BoltOptions](#boltoptions)
|
||||
- [MarshalUnmarshaler](#marshalunmarshaler)
|
||||
- [Provided Codecs](#provided-codecs)
|
||||
- [Use existing Bolt connection](#use-existing-bolt-connection)
|
||||
- [Batch mode](#batch-mode)
|
||||
- [Nodes and nested buckets](#nodes-and-nested-buckets)
|
||||
- [Node options](#node-options)
|
||||
- [Simple Key/Value store](#simple-keyvalue-store)
|
||||
- [BoltDB](#boltdb)
|
||||
- [License](#license)
|
||||
- [Credits](#credits)
|
||||
|
||||
## Getting Started
|
||||
|
||||
```bash
|
||||
go get -u github.com/asdine/storm
|
||||
```
|
||||
|
||||
## Import Storm
|
||||
|
||||
```go
|
||||
import "github.com/asdine/storm"
|
||||
```
|
||||
|
||||
## Open a database
|
||||
|
||||
Quick way of opening a database
|
||||
|
||||
```go
|
||||
db, err := storm.Open("my.db")
|
||||
|
||||
defer db.Close()
|
||||
```
|
||||
|
||||
`Open` can receive multiple options to customize the way it behaves. See [Options](#options) below
|
||||
|
||||
## Simple CRUD system
|
||||
|
||||
### Declare your structures
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ID int // primary key
|
||||
Group string `storm:"index"` // this field will be indexed
|
||||
Email string `storm:"unique"` // this field will be indexed with a unique constraint
|
||||
Name string // this field will not be indexed
|
||||
Age int `storm:"index"`
|
||||
}
|
||||
```
|
||||
|
||||
The primary key can be of any type as long as it is not a zero value. Storm will search for the tag `id`, if not present Storm will search for a field named `ID`.
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ThePrimaryKey string `storm:"id"`// primary key
|
||||
Group string `storm:"index"` // this field will be indexed
|
||||
Email string `storm:"unique"` // this field will be indexed with a unique constraint
|
||||
Name string // this field will not be indexed
|
||||
}
|
||||
```
|
||||
|
||||
Storm handles tags in nested structures with the `inline` tag
|
||||
|
||||
```go
|
||||
type Base struct {
|
||||
Ident bson.ObjectId `storm:"id"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Base `storm:"inline"`
|
||||
Group string `storm:"index"`
|
||||
Email string `storm:"unique"`
|
||||
Name string
|
||||
CreatedAt time.Time `storm:"index"`
|
||||
}
|
||||
```
|
||||
|
||||
### Save your object
|
||||
|
||||
```go
|
||||
user := User{
|
||||
ID: 10,
|
||||
Group: "staff",
|
||||
Email: "john@provider.com",
|
||||
Name: "John",
|
||||
Age: 21,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := db.Save(&user)
|
||||
// err == nil
|
||||
|
||||
user.ID++
|
||||
err = db.Save(&user)
|
||||
// err == storm.ErrAlreadyExists
|
||||
```
|
||||
|
||||
That's it.
|
||||
|
||||
`Save` creates or updates all the required indexes and buckets, checks the unique constraints and saves the object to the store.
|
||||
|
||||
#### Auto Increment
|
||||
|
||||
Storm can auto increment integer values so you don't have to worry about that when saving your objects. Also, the new value is automatically inserted in your field.
|
||||
|
||||
```go
|
||||
|
||||
type Product struct {
|
||||
Pk int `storm:"id,increment"` // primary key with auto increment
|
||||
Name string
|
||||
IntegerField uint64 `storm:"increment"`
|
||||
IndexedIntegerField uint32 `storm:"index,increment"`
|
||||
UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set
|
||||
}
|
||||
|
||||
p := Product{Name: "Vaccum Cleaner"}
|
||||
|
||||
fmt.Println(p.Pk)
|
||||
fmt.Println(p.IntegerField)
|
||||
fmt.Println(p.IndexedIntegerField)
|
||||
fmt.Println(p.UniqueIntegerField)
|
||||
// 0
|
||||
// 0
|
||||
// 0
|
||||
// 0
|
||||
|
||||
_ = db.Save(&p)
|
||||
|
||||
fmt.Println(p.Pk)
|
||||
fmt.Println(p.IntegerField)
|
||||
fmt.Println(p.IndexedIntegerField)
|
||||
fmt.Println(p.UniqueIntegerField)
|
||||
// 1
|
||||
// 1
|
||||
// 1
|
||||
// 100
|
||||
|
||||
```
|
||||
|
||||
### Simple queries
|
||||
|
||||
Any object can be fetched, indexed or not. Storm uses indexes when available, otherwise it uses the [query system](#advanced-queries).
|
||||
|
||||
#### Fetch one object
|
||||
|
||||
```go
|
||||
var user User
|
||||
err := db.One("Email", "john@provider.com", &user)
|
||||
// err == nil
|
||||
|
||||
err = db.One("Name", "John", &user)
|
||||
// err == nil
|
||||
|
||||
err = db.One("Name", "Jack", &user)
|
||||
// err == storm.ErrNotFound
|
||||
```
|
||||
|
||||
#### Fetch multiple objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Find("Group", "staff", &users)
|
||||
```
|
||||
|
||||
#### Fetch all objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.All(&users)
|
||||
```
|
||||
|
||||
#### Fetch all objects sorted by index
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.AllByIndex("CreatedAt", &users)
|
||||
```
|
||||
|
||||
#### Fetch a range of objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Range("Age", 10, 21, &users)
|
||||
```
|
||||
|
||||
#### Fetch objects by prefix
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Prefix("Name", "Jo", &users)
|
||||
```
|
||||
|
||||
#### Skip, Limit and Reverse
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Find("Group", "staff", &users, storm.Skip(10))
|
||||
err = db.Find("Group", "staff", &users, storm.Limit(10))
|
||||
err = db.Find("Group", "staff", &users, storm.Reverse())
|
||||
err = db.Find("Group", "staff", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
|
||||
err = db.All(&users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
err = db.AllByIndex("CreatedAt", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
err = db.Range("Age", 10, 21, &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
```
|
||||
|
||||
#### Delete an object
|
||||
|
||||
```go
|
||||
err := db.DeleteStruct(&user)
|
||||
```
|
||||
|
||||
#### Update an object
|
||||
|
||||
```go
|
||||
// Update multiple fields
|
||||
err := db.Update(&User{ID: 10, Name: "Jack", Age: 45})
|
||||
|
||||
// Update a single field
|
||||
err := db.UpdateField(&User{ID: 10}, "Age", 0)
|
||||
```
|
||||
|
||||
#### Initialize buckets and indexes before saving an object
|
||||
|
||||
```go
|
||||
err := db.Init(&User{})
|
||||
```
|
||||
|
||||
Useful when starting your application
|
||||
|
||||
#### Drop a bucket
|
||||
|
||||
Using the struct
|
||||
|
||||
```go
|
||||
err := db.Drop(&User)
|
||||
```
|
||||
|
||||
Using the bucket name
|
||||
|
||||
```go
|
||||
err := db.Drop("User")
|
||||
```
|
||||
|
||||
#### Re-index a bucket
|
||||
|
||||
```go
|
||||
err := db.ReIndex(&User{})
|
||||
```
|
||||
|
||||
Useful when the structure has changed
|
||||
|
||||
### Advanced queries
|
||||
|
||||
For more complex queries, you can use the `Select` method.
|
||||
`Select` takes any number of [`Matcher`](https://godoc.org/github.com/asdine/storm/q#Matcher) from the [`q`](https://godoc.org/github.com/asdine/storm/q) package.
|
||||
|
||||
Here are some common Matchers:
|
||||
|
||||
```go
|
||||
// Equality
|
||||
q.Eq("Name", John)
|
||||
|
||||
// Strictly greater than
|
||||
q.Gt("Age", 7)
|
||||
|
||||
// Lesser than or equal to
|
||||
q.Lte("Age", 77)
|
||||
|
||||
// Regex with name that starts with the letter D
|
||||
q.Re("Name", "^D")
|
||||
|
||||
// In the given slice of values
|
||||
q.In("Group", []string{"Staff", "Admin"})
|
||||
```
|
||||
|
||||
Matchers can also be combined with `And`, `Or` and `Not`:
|
||||
|
||||
```go
|
||||
|
||||
// Match if all match
|
||||
q.And(
|
||||
q.Gt("Age", 7),
|
||||
q.Re("Name", "^D")
|
||||
)
|
||||
|
||||
// Match if one matches
|
||||
q.Or(
|
||||
q.Re("Name", "^A"),
|
||||
q.Not(
|
||||
q.Re("Name", "^B")
|
||||
),
|
||||
q.Re("Name", "^C"),
|
||||
q.In("Group", []string{"Staff", "Admin"}),
|
||||
q.And(
|
||||
q.StrictEq("Password", []byte(password)),
|
||||
q.Eq("Registered", true)
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
You can find the complete list in the [documentation](https://godoc.org/github.com/asdine/storm/q#Matcher).
|
||||
|
||||
`Select` takes any number of matchers and wraps them into a `q.And()` so it's not necessary to specify it. It returns a [`Query`](https://godoc.org/github.com/asdine/storm#Query) type.
|
||||
|
||||
```go
|
||||
query := db.Select(q.Gte("Age", 7), q.Lte("Age", 77))
|
||||
```
|
||||
|
||||
The `Query` type contains methods to filter and order the records.
|
||||
|
||||
```go
|
||||
// Limit
|
||||
query = query.Limit(10)
|
||||
|
||||
// Skip
|
||||
query = query.Skip(20)
|
||||
|
||||
// Calls can also be chained
|
||||
query = query.Limit(10).Skip(20).OrderBy("Age").Reverse()
|
||||
```
|
||||
|
||||
But also to specify how to fetch them.
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err = query.Find(&users)
|
||||
|
||||
var user User
|
||||
err = query.First(&user)
|
||||
```
|
||||
|
||||
Examples with `Select`:
|
||||
|
||||
```go
|
||||
// Find all users with an ID between 10 and 100
|
||||
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Find(&users)
|
||||
|
||||
// Nested matchers
|
||||
err = db.Select(q.Or(
|
||||
q.Gt("ID", 50),
|
||||
q.Lt("Age", 21),
|
||||
q.And(
|
||||
q.Eq("Group", "admin"),
|
||||
q.Gte("Age", 21),
|
||||
),
|
||||
)).Find(&users)
|
||||
|
||||
query := db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name")
|
||||
|
||||
// Find multiple records
|
||||
err = query.Find(&users)
|
||||
// or
|
||||
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name").Find(&users)
|
||||
|
||||
// Find first record
|
||||
err = query.First(&user)
|
||||
// or
|
||||
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name").First(&user)
|
||||
|
||||
// Delete all matching records
|
||||
err = query.Delete(new(User))
|
||||
|
||||
// Fetching records one by one (useful when the bucket contains a lot of records)
|
||||
query = db.Select(q.Gte("ID", 10),q.Lte("ID", 100)).OrderBy("Age", "Name")
|
||||
|
||||
err = query.Each(new(User), func(record interface{}) error) {
|
||||
u := record.(*User)
|
||||
...
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
See the [documentation](https://godoc.org/github.com/asdine/storm#Query) for a complete list of methods.
|
||||
|
||||
### Transactions
|
||||
|
||||
```go
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
accountA.Amount -= 100
|
||||
accountB.Amount += 100
|
||||
|
||||
err = tx.Save(accountA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Save(accountB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options.
|
||||
|
||||
#### BoltOptions
|
||||
|
||||
By default, Storm opens a database with the mode `0600` and a timeout of one second.
|
||||
You can change this behavior by using `BoltOptions`
|
||||
|
||||
```go
|
||||
db, err := storm.Open("my.db", storm.BoltOptions(0600, &bolt.Options{Timeout: 1 * time.Second}))
|
||||
```
|
||||
|
||||
#### MarshalUnmarshaler
|
||||
|
||||
To store the data in BoltDB, Storm marshals it in JSON by default. If you wish to change this behavior you can pass a codec that implements [`codec.MarshalUnmarshaler`](https://godoc.org/github.com/asdine/storm/codec#MarshalUnmarshaler) via the [`storm.Codec`](https://godoc.org/github.com/asdine/storm#Codec) option:
|
||||
|
||||
```go
|
||||
db := storm.Open("my.db", storm.Codec(myCodec))
|
||||
```
|
||||
|
||||
##### Provided Codecs
|
||||
|
||||
You can easily implement your own `MarshalUnmarshaler`, but Storm comes with built-in support for [JSON](https://godoc.org/github.com/asdine/storm/codec/json) (default), [GOB](https://godoc.org/github.com/asdine/storm/codec/gob), [Sereal](https://godoc.org/github.com/asdine/storm/codec/sereal), [Protocol Buffers](https://godoc.org/github.com/asdine/storm/codec/protobuf) and [MessagePack](https://godoc.org/github.com/asdine/storm/codec/msgpack).
|
||||
|
||||
These can be used by importing the relevant package and use that codec to configure Storm. The example below shows all variants (without proper error handling):
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/asdine/storm"
|
||||
"github.com/asdine/storm/codec/gob"
|
||||
"github.com/asdine/storm/codec/json"
|
||||
"github.com/asdine/storm/codec/sereal"
|
||||
"github.com/asdine/storm/codec/protobuf"
|
||||
"github.com/asdine/storm/codec/msgpack"
|
||||
)
|
||||
|
||||
var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
|
||||
var jsonDb, _ = storm.Open("json.db", storm.Codec(json.Codec))
|
||||
var serealDb, _ = storm.Open("sereal.db", storm.Codec(sereal.Codec))
|
||||
var protobufDb, _ = storm.Open("protobuf.db", storm.Codec(protobuf.Codec))
|
||||
var msgpackDb, _ = storm.Open("msgpack.db", storm.Codec(msgpack.Codec))
|
||||
```
|
||||
|
||||
**Tip**: Adding Storm tags to generated Protobuf files can be tricky. A good solution is to use [this tool](https://github.com/favadi/protoc-go-inject-tag) to inject the tags during the compilation.
|
||||
|
||||
#### Use existing Bolt connection
|
||||
|
||||
You can use an existing connection and pass it to Storm
|
||||
|
||||
```go
|
||||
bDB, _ := bolt.Open(filepath.Join(dir, "bolt.db"), 0600, &bolt.Options{Timeout: 10 * time.Second})
|
||||
db := storm.Open("my.db", storm.UseDB(bDB))
|
||||
```
|
||||
|
||||
#### Batch mode
|
||||
|
||||
Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/coreos/bbolt#batch-read-write-transactions))
|
||||
|
||||
```go
|
||||
db := storm.Open("my.db", storm.Batch())
|
||||
```
|
||||
|
||||
## Nodes and nested buckets
|
||||
|
||||
Storm takes advantage of BoltDB nested buckets feature by using `storm.Node`.
|
||||
A `storm.Node` is the underlying object used by `storm.DB` to manipulate a bucket.
|
||||
To create a nested bucket and use the same API as `storm.DB`, you can use the `DB.From` method.
|
||||
|
||||
```go
|
||||
repo := db.From("repo")
|
||||
|
||||
err := repo.Save(&Issue{
|
||||
Title: "I want more features",
|
||||
Author: user.ID,
|
||||
})
|
||||
|
||||
err = repo.Save(newRelease("0.10"))
|
||||
|
||||
var issues []Issue
|
||||
err = repo.Find("Author", user.ID, &issues)
|
||||
|
||||
var release Release
|
||||
err = repo.One("Tag", "0.10", &release)
|
||||
```
|
||||
|
||||
You can also chain the nodes to create a hierarchy
|
||||
|
||||
```go
|
||||
chars := db.From("characters")
|
||||
heroes := chars.From("heroes")
|
||||
enemies := chars.From("enemies")
|
||||
|
||||
items := db.From("items")
|
||||
potions := items.From("consumables").From("medicine").From("potions")
|
||||
```
|
||||
|
||||
You can even pass the entire hierarchy as arguments to `From`:
|
||||
|
||||
```go
|
||||
privateNotes := db.From("notes", "private")
|
||||
workNotes := db.From("notes", "work")
|
||||
```
|
||||
|
||||
### Node options
|
||||
|
||||
A Node can also be configured. Activating an option on a Node creates a copy, so a Node is always thread-safe.
|
||||
|
||||
```go
|
||||
n := db.From("my-node")
|
||||
```
|
||||
|
||||
Give a bolt.Tx transaction to the Node
|
||||
|
||||
```go
|
||||
n = n.WithTransaction(tx)
|
||||
```
|
||||
|
||||
Enable batch mode
|
||||
|
||||
```go
|
||||
n = n.WithBatch(true)
|
||||
```
|
||||
|
||||
Use a Codec
|
||||
|
||||
```go
|
||||
n = n.WithCodec(gob.Codec)
|
||||
```
|
||||
|
||||
## Simple Key/Value store
|
||||
|
||||
Storm can be used as a simple, robust, key/value store that can store anything.
|
||||
The key and the value can be of any type as long as the key is not a zero value.
|
||||
|
||||
Saving data :
|
||||
|
||||
```go
|
||||
db.Set("logs", time.Now(), "I'm eating my breakfast man")
|
||||
db.Set("sessions", bson.NewObjectId(), &someUser)
|
||||
db.Set("weird storage", "754-3010", map[string]interface{}{
|
||||
"hair": "blonde",
|
||||
"likes": []string{"cheese", "star wars"},
|
||||
})
|
||||
```
|
||||
|
||||
Fetching data :
|
||||
|
||||
```go
|
||||
user := User{}
|
||||
db.Get("sessions", someObjectId, &user)
|
||||
|
||||
var details map[string]interface{}
|
||||
db.Get("weird storage", "754-3010", &details)
|
||||
|
||||
db.Get("sessions", someObjectId, &details)
|
||||
```
|
||||
|
||||
Deleting data :
|
||||
|
||||
```go
|
||||
db.Delete("sessions", someObjectId)
|
||||
db.Delete("weird storage", "754-3010")
|
||||
```
|
||||
|
||||
## BoltDB
|
||||
|
||||
BoltDB is still easily accessible and can be used as usual
|
||||
|
||||
```go
|
||||
db.Bolt.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte("my bucket"))
|
||||
val := bucket.Get([]byte("any id"))
|
||||
fmt.Println(string(val))
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
A transaction can be also be passed to Storm
|
||||
|
||||
```go
|
||||
db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
dbx := db.WithTransaction(tx)
|
||||
err = dbx.Save(&user)
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Credits
|
||||
|
||||
- [Asdine El Hrychy](https://github.com/asdine)
|
||||
- [Bjørn Erik Pedersen](https://github.com/bep)
|
47
vendor/github.com/asdine/storm/bucket.go
generated
vendored
Normal file
47
vendor/github.com/asdine/storm/bucket.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package storm
|
||||
|
||||
import "github.com/coreos/bbolt"
|
||||
|
||||
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
|
||||
// already exist.
|
||||
func (n *node) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
|
||||
var b *bolt.Bucket
|
||||
var err error
|
||||
|
||||
bucketNames := append(n.rootBucket, bucket)
|
||||
|
||||
for _, bucketName := range bucketNames {
|
||||
if b != nil {
|
||||
if b, err = b.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
if b, err = tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GetBucket returns the given bucket below the current node.
|
||||
func (n *node) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
|
||||
var b *bolt.Bucket
|
||||
|
||||
bucketNames := append(n.rootBucket, children...)
|
||||
for _, bucketName := range bucketNames {
|
||||
if b != nil {
|
||||
if b = b.Bucket([]byte(bucketName)); b == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if b = tx.Bucket([]byte(bucketName)); b == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
1
vendor/github.com/asdine/storm/codec/.gitignore
generated
vendored
Normal file
1
vendor/github.com/asdine/storm/codec/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.db
|
11
vendor/github.com/asdine/storm/codec/codec.go
generated
vendored
Normal file
11
vendor/github.com/asdine/storm/codec/codec.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Package codec contains sub-packages with different codecs that can be used
|
||||
// to encode and decode entities in Storm.
|
||||
package codec
|
||||
|
||||
// MarshalUnmarshaler represents a codec used to marshal and unmarshal entities.
|
||||
type MarshalUnmarshaler interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(b []byte, v interface{}) error
|
||||
// name of this codec
|
||||
Name() string
|
||||
}
|
25
vendor/github.com/asdine/storm/codec/json/json.go
generated
vendored
Normal file
25
vendor/github.com/asdine/storm/codec/json/json.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Package json contains a codec to encode and decode entities in JSON format
|
||||
package json
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
const name = "json"
|
||||
|
||||
// Codec that encodes to and decodes from JSON.
|
||||
var Codec = new(jsonCodec)
|
||||
|
||||
type jsonCodec int
|
||||
|
||||
func (j jsonCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (j jsonCodec) Unmarshal(b []byte, v interface{}) error {
|
||||
return json.Unmarshal(b, v)
|
||||
}
|
||||
|
||||
func (j jsonCodec) Name() string {
|
||||
return name
|
||||
}
|
51
vendor/github.com/asdine/storm/errors.go
generated
vendored
Normal file
51
vendor/github.com/asdine/storm/errors.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package storm
|
||||
|
||||
import "errors"
|
||||
|
||||
// Errors
|
||||
var (
|
||||
// ErrNoID is returned when no ID field or id tag is found in the struct.
|
||||
ErrNoID = errors.New("missing struct tag id or ID field")
|
||||
|
||||
// ErrZeroID is returned when the ID field is a zero value.
|
||||
ErrZeroID = errors.New("id field must not be a zero value")
|
||||
|
||||
// ErrBadType is returned when a method receives an unexpected value type.
|
||||
ErrBadType = errors.New("provided data must be a struct or a pointer to struct")
|
||||
|
||||
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
|
||||
// ErrNilParam is returned when the specified param is expected to be not nil.
|
||||
ErrNilParam = errors.New("param must not be nil")
|
||||
|
||||
// ErrUnknownTag is returned when an unexpected tag is specified.
|
||||
ErrUnknownTag = errors.New("unknown tag")
|
||||
|
||||
// ErrIdxNotFound is returned when the specified index is not found.
|
||||
ErrIdxNotFound = errors.New("index not found")
|
||||
|
||||
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to slice.
|
||||
ErrSlicePtrNeeded = errors.New("provided target must be a pointer to slice")
|
||||
|
||||
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to struct.
|
||||
ErrStructPtrNeeded = errors.New("provided target must be a pointer to struct")
|
||||
|
||||
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer.
|
||||
ErrPtrNeeded = errors.New("provided target must be a pointer to a valid variable")
|
||||
|
||||
// ErrNoName is returned when the specified struct has no name.
|
||||
ErrNoName = errors.New("provided target must have a name")
|
||||
|
||||
// ErrNotFound is returned when the specified record is not saved in the bucket.
|
||||
ErrNotFound = errors.New("not found")
|
||||
|
||||
// ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
|
||||
ErrNotInTransaction = errors.New("not in transaction")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
|
||||
// ErrDifferentCodec is returned when using a codec different than the first codec used with the bucket.
|
||||
ErrDifferentCodec = errors.New("the selected codec is incompatible with this bucket")
|
||||
)
|
226
vendor/github.com/asdine/storm/extract.go
generated
vendored
Normal file
226
vendor/github.com/asdine/storm/extract.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// Storm tags
|
||||
const (
|
||||
tagID = "id"
|
||||
tagIdx = "index"
|
||||
tagUniqueIdx = "unique"
|
||||
tagInline = "inline"
|
||||
tagIncrement = "increment"
|
||||
indexPrefix = "__storm_index_"
|
||||
)
|
||||
|
||||
type fieldConfig struct {
|
||||
Name string
|
||||
Index string
|
||||
IsZero bool
|
||||
IsID bool
|
||||
Increment bool
|
||||
IncrementStart int64
|
||||
IsInteger bool
|
||||
Value *reflect.Value
|
||||
ForceUpdate bool
|
||||
}
|
||||
|
||||
// structConfig is a structure gathering all the relevant informations about a model
|
||||
type structConfig struct {
|
||||
Name string
|
||||
Fields map[string]*fieldConfig
|
||||
ID *fieldConfig
|
||||
}
|
||||
|
||||
func extract(s *reflect.Value, mi ...*structConfig) (*structConfig, error) {
|
||||
if s.Kind() == reflect.Ptr {
|
||||
e := s.Elem()
|
||||
s = &e
|
||||
}
|
||||
if s.Kind() != reflect.Struct {
|
||||
return nil, ErrBadType
|
||||
}
|
||||
|
||||
typ := s.Type()
|
||||
|
||||
var child bool
|
||||
|
||||
var m *structConfig
|
||||
if len(mi) > 0 {
|
||||
m = mi[0]
|
||||
child = true
|
||||
} else {
|
||||
m = &structConfig{}
|
||||
m.Fields = make(map[string]*fieldConfig)
|
||||
}
|
||||
|
||||
if m.Name == "" {
|
||||
m.Name = typ.Name()
|
||||
}
|
||||
|
||||
numFields := s.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := typ.Field(i)
|
||||
value := s.Field(i)
|
||||
|
||||
if field.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
err := extractField(&value, &field, m, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if child {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if m.ID == nil {
|
||||
return nil, ErrNoID
|
||||
}
|
||||
|
||||
if m.Name == "" {
|
||||
return nil, ErrNoName
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func extractField(value *reflect.Value, field *reflect.StructField, m *structConfig, isChild bool) error {
|
||||
var f *fieldConfig
|
||||
var err error
|
||||
|
||||
tag := field.Tag.Get("storm")
|
||||
if tag != "" {
|
||||
f = &fieldConfig{
|
||||
Name: field.Name,
|
||||
IsZero: isZero(value),
|
||||
IsInteger: isInteger(value),
|
||||
Value: value,
|
||||
IncrementStart: 1,
|
||||
}
|
||||
|
||||
tags := strings.Split(tag, ",")
|
||||
|
||||
for _, tag := range tags {
|
||||
switch tag {
|
||||
case "id":
|
||||
f.IsID = true
|
||||
f.Index = tagUniqueIdx
|
||||
case tagUniqueIdx, tagIdx:
|
||||
f.Index = tag
|
||||
case tagInline:
|
||||
if value.Kind() == reflect.Ptr {
|
||||
e := value.Elem()
|
||||
value = &e
|
||||
}
|
||||
if value.Kind() == reflect.Struct {
|
||||
a := value.Addr()
|
||||
_, err := extract(&a, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// we don't need to save this field
|
||||
return nil
|
||||
default:
|
||||
if strings.HasPrefix(tag, tagIncrement) {
|
||||
f.Increment = true
|
||||
parts := strings.Split(tag, "=")
|
||||
if parts[0] != tagIncrement {
|
||||
return ErrUnknownTag
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
f.IncrementStart, err = strconv.ParseInt(parts[1], 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return ErrUnknownTag
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := m.Fields[f.Name]; !ok || !isChild {
|
||||
m.Fields[f.Name] = f
|
||||
}
|
||||
}
|
||||
|
||||
if m.ID == nil && f != nil && f.IsID {
|
||||
m.ID = f
|
||||
}
|
||||
|
||||
// the field is named ID and no ID field has been detected before
|
||||
if m.ID == nil && field.Name == "ID" {
|
||||
if f == nil {
|
||||
f = &fieldConfig{
|
||||
Index: tagUniqueIdx,
|
||||
Name: field.Name,
|
||||
IsZero: isZero(value),
|
||||
IsInteger: isInteger(value),
|
||||
IsID: true,
|
||||
Value: value,
|
||||
IncrementStart: 1,
|
||||
}
|
||||
m.Fields[field.Name] = f
|
||||
}
|
||||
m.ID = f
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractSingleField(ref *reflect.Value, fieldName string) (*structConfig, error) {
|
||||
var cfg structConfig
|
||||
cfg.Fields = make(map[string]*fieldConfig)
|
||||
|
||||
f, ok := ref.Type().FieldByName(fieldName)
|
||||
if !ok || f.PkgPath != "" {
|
||||
return nil, fmt.Errorf("field %s not found", fieldName)
|
||||
}
|
||||
|
||||
v := ref.FieldByName(fieldName)
|
||||
err := extractField(&v, &f, &cfg, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func getIndex(bucket *bolt.Bucket, idxKind string, fieldName string) (index.Index, error) {
|
||||
var idx index.Index
|
||||
var err error
|
||||
|
||||
switch idxKind {
|
||||
case tagUniqueIdx:
|
||||
idx, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
case tagIdx:
|
||||
idx, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
default:
|
||||
err = ErrIdxNotFound
|
||||
}
|
||||
|
||||
return idx, err
|
||||
}
|
||||
|
||||
func isZero(v *reflect.Value) bool {
|
||||
zero := reflect.Zero(v.Type()).Interface()
|
||||
current := v.Interface()
|
||||
return reflect.DeepEqual(current, zero)
|
||||
}
|
||||
|
||||
func isInteger(v *reflect.Value) bool {
|
||||
kind := v.Kind()
|
||||
return v != nil && kind >= reflect.Int && kind <= reflect.Uint64
|
||||
}
|
499
vendor/github.com/asdine/storm/finder.go
generated
vendored
Normal file
499
vendor/github.com/asdine/storm/finder.go
generated
vendored
Normal file
@@ -0,0 +1,499 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/asdine/storm/q"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// A Finder can fetch types from BoltDB.
|
||||
type Finder interface {
|
||||
// One returns one record by the specified index
|
||||
One(fieldName string, value interface{}, to interface{}) error
|
||||
|
||||
// Find returns one or more records by the specified index
|
||||
Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error
|
||||
|
||||
// AllByIndex gets all the records of a bucket that are indexed in the specified index
|
||||
AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// All gets all the records of a bucket.
|
||||
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
|
||||
All(to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// Select a list of records that match a list of matchers. Doesn't use indexes.
|
||||
Select(matchers ...q.Matcher) Query
|
||||
|
||||
// Range returns one or more records by the specified index within the specified range
|
||||
Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// Prefix returns one or more records whose given field starts with the specified prefix.
|
||||
Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// Count counts all the records of a bucket
|
||||
Count(data interface{}) (int, error)
|
||||
}
|
||||
|
||||
// One returns one record by the specified index
|
||||
func (n *node) One(fieldName string, value interface{}, to interface{}) error {
|
||||
sink, err := newFirstSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
if fieldName == "" {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(sink.ref)
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && field.Index == "") {
|
||||
query := newQuery(n, q.StrictEq(fieldName, value))
|
||||
query.Limit(1)
|
||||
|
||||
if n.tx != nil {
|
||||
err = query.query(n.tx, sink)
|
||||
} else {
|
||||
err = n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
val, err := toBytes(value, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.one(tx, bucketName, fieldName, cfg, to, val, field.IsID)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) one(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, to interface{}, val []byte, skipIndex bool) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
var id []byte
|
||||
if !skipIndex {
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
id = idx.Get(val)
|
||||
} else {
|
||||
id = val
|
||||
}
|
||||
|
||||
if id == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return n.codec.Unmarshal(raw, to)
|
||||
}
|
||||
|
||||
// Find returns one or more records by the specified index
|
||||
func (n *node) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
|
||||
sink, err := newListSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(reflect.New(sink.elemType))
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && (field.Index == "" || value == nil)) {
|
||||
query := newQuery(n, q.Eq(fieldName, value))
|
||||
query.Skip(opts.Skip).Limit(opts.Limit)
|
||||
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err = n.readTx(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
val, err := toBytes(value, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.find(tx, bucketName, fieldName, cfg, sink, val, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) find(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, val []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.All(val, opts)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
|
||||
|
||||
sorter := newSorter(n, sink)
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
||||
|
||||
// AllByIndex gets all the records of a bucket that are indexed in the specified index
|
||||
func (n *node) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
|
||||
if fieldName == "" {
|
||||
return n.All(to, options...)
|
||||
}
|
||||
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Slice {
|
||||
return ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
typ := reflect.Indirect(ref).Type().Elem()
|
||||
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
|
||||
newElem := reflect.New(typ)
|
||||
|
||||
cfg, err := extract(&newElem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.Name == fieldName {
|
||||
return n.All(to, options...)
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.allByIndex(tx, fieldName, cfg, &ref, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) allByIndex(tx *bolt.Tx, fieldName string, cfg *structConfig, ref *reflect.Value, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, cfg.Name)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
fieldCfg, ok := cfg.Fields[fieldName]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.AllRecords(opts)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
|
||||
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
err = n.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reflect.Indirect(*ref).Set(results)
|
||||
return nil
|
||||
}
|
||||
|
||||
// All gets all the records of a bucket.
|
||||
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
|
||||
func (n *node) All(to interface{}, options ...func(*index.Options)) error {
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
query := newQuery(n, nil).Limit(opts.Limit).Skip(opts.Skip)
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err := query.Find(to)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == ErrNotFound {
|
||||
ref := reflect.ValueOf(to)
|
||||
results := reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0)
|
||||
reflect.Indirect(ref).Set(results)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Range returns one or more records by the specified index within the specified range
|
||||
func (n *node) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
|
||||
sink, err := newListSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(reflect.New(sink.elemType))
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && field.Index == "") {
|
||||
query := newQuery(n, q.And(q.Gte(fieldName, min), q.Lte(fieldName, max)))
|
||||
query.Skip(opts.Skip).Limit(opts.Limit)
|
||||
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err = n.readTx(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
mn, err := toBytes(min, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mx, err := toBytes(max, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.rnge(tx, bucketName, fieldName, cfg, sink, mn, mx, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) rnge(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, min, max []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
reflect.Indirect(sink.ref).SetLen(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.Range(min, max, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
|
||||
sorter := newSorter(n, sink)
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
||||
|
||||
// Prefix returns one or more records whose given field starts with the specified prefix.
|
||||
func (n *node) Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error {
|
||||
sink, err := newListSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(reflect.New(sink.elemType))
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && field.Index == "") {
|
||||
query := newQuery(n, q.Re(fieldName, fmt.Sprintf("^%s", prefix)))
|
||||
query.Skip(opts.Skip).Limit(opts.Limit)
|
||||
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err = n.readTx(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
prfx, err := toBytes(prefix, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.prefix(tx, bucketName, fieldName, cfg, sink, prfx, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) prefix(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, prefix []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
reflect.Indirect(sink.ref).SetLen(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.Prefix(prefix, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
|
||||
sorter := newSorter(n, sink)
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
||||
|
||||
// Count counts all the records of a bucket
|
||||
func (n *node) Count(data interface{}) (int, error) {
|
||||
return n.Select().Count(data)
|
||||
}
|
14
vendor/github.com/asdine/storm/index/errors.go
generated
vendored
Normal file
14
vendor/github.com/asdine/storm/index/errors.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package index
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrNotFound is returned when the specified record is not saved in the bucket.
|
||||
ErrNotFound = errors.New("not found")
|
||||
|
||||
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
|
||||
// ErrNilParam is returned when the specified param is expected to be not nil.
|
||||
ErrNilParam = errors.New("param must not be nil")
|
||||
)
|
14
vendor/github.com/asdine/storm/index/indexes.go
generated
vendored
Normal file
14
vendor/github.com/asdine/storm/index/indexes.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Package index contains Index engines used to store values and their corresponding IDs
|
||||
package index
|
||||
|
||||
// Index interface
|
||||
type Index interface {
|
||||
Add(value []byte, targetID []byte) error
|
||||
Remove(value []byte) error
|
||||
RemoveID(id []byte) error
|
||||
Get(value []byte) []byte
|
||||
All(value []byte, opts *Options) ([][]byte, error)
|
||||
AllRecords(opts *Options) ([][]byte, error)
|
||||
Range(min []byte, max []byte, opts *Options) ([][]byte, error)
|
||||
Prefix(prefix []byte, opts *Options) ([][]byte, error)
|
||||
}
|
283
vendor/github.com/asdine/storm/index/list.go
generated
vendored
Normal file
283
vendor/github.com/asdine/storm/index/list.go
generated
vendored
Normal file
@@ -0,0 +1,283 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/asdine/storm/internal"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// NewListIndex loads a ListIndex
|
||||
func NewListIndex(parent *bolt.Bucket, indexName []byte) (*ListIndex, error) {
|
||||
var err error
|
||||
b := parent.Bucket(indexName)
|
||||
if b == nil {
|
||||
if !parent.Writable() {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
b, err = parent.CreateBucket(indexName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ids, err := NewUniqueIndex(b, []byte("storm__ids"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ListIndex{
|
||||
IndexBucket: b,
|
||||
Parent: parent,
|
||||
IDs: ids,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListIndex is an index that references values and the corresponding IDs.
|
||||
type ListIndex struct {
|
||||
Parent *bolt.Bucket
|
||||
IndexBucket *bolt.Bucket
|
||||
IDs *UniqueIndex
|
||||
}
|
||||
|
||||
// Add a value to the list index
|
||||
func (idx *ListIndex) Add(newValue []byte, targetID []byte) error {
|
||||
if newValue == nil || len(newValue) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
if targetID == nil || len(targetID) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
key := idx.IDs.Get(targetID)
|
||||
if key != nil {
|
||||
err := idx.IndexBucket.Delete(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.IDs.Remove(targetID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key = key[:0]
|
||||
}
|
||||
|
||||
key = append(key, newValue...)
|
||||
key = append(key, '_')
|
||||
key = append(key, '_')
|
||||
key = append(key, targetID...)
|
||||
|
||||
err := idx.IDs.Add(targetID, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return idx.IndexBucket.Put(key, targetID)
|
||||
}
|
||||
|
||||
// Remove a value from the unique index
|
||||
func (idx *ListIndex) Remove(value []byte) error {
|
||||
var err error
|
||||
var keys [][]byte
|
||||
|
||||
c := idx.IndexBucket.Cursor()
|
||||
prefix := generatePrefix(value)
|
||||
|
||||
for k, _ := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, _ = c.Next() {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
err = idx.IndexBucket.Delete(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return idx.IDs.RemoveID(value)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the list index
|
||||
func (idx *ListIndex) RemoveID(targetID []byte) error {
|
||||
value := idx.IDs.Get(targetID)
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := idx.IndexBucket.Delete(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return idx.IDs.Remove(targetID)
|
||||
}
|
||||
|
||||
// Get the first ID corresponding to the given value
|
||||
func (idx *ListIndex) Get(value []byte) []byte {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
prefix := generatePrefix(value)
|
||||
|
||||
for k, id := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, id = c.Next() {
|
||||
return id
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All the IDs corresponding to the given value
|
||||
func (idx *ListIndex) All(value []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
c := idx.IndexBucket.Cursor()
|
||||
cur := internal.Cursor{C: c, Reverse: opts != nil && opts.Reverse}
|
||||
|
||||
prefix := generatePrefix(value)
|
||||
|
||||
k, id := c.Seek(prefix)
|
||||
if cur.Reverse {
|
||||
var count int
|
||||
kc := k
|
||||
idc := id
|
||||
for ; kc != nil && bytes.HasPrefix(kc, prefix); kc, idc = c.Next() {
|
||||
count++
|
||||
k, id = kc, idc
|
||||
}
|
||||
if kc != nil {
|
||||
k, id = c.Prev()
|
||||
}
|
||||
list = make([][]byte, 0, count)
|
||||
}
|
||||
|
||||
for ; bytes.HasPrefix(k, prefix); k, id = cur.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// AllRecords returns all the IDs of this index
|
||||
func (idx *ListIndex) AllRecords(opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
|
||||
|
||||
for k, id := c.First(); k != nil; k, id = c.Next() {
|
||||
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Range returns the ids corresponding to the given range of values
|
||||
func (idx *ListIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.RangeCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Min: min,
|
||||
Max: max,
|
||||
CompareFn: func(val, limit []byte) int {
|
||||
pos := bytes.LastIndex(val, []byte("__"))
|
||||
return bytes.Compare(val[:pos], limit)
|
||||
},
|
||||
}
|
||||
|
||||
for k, id := c.First(); c.Continue(k); k, id = c.Next() {
|
||||
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Prefix returns the ids whose values have the given prefix.
|
||||
func (idx *ListIndex) Prefix(prefix []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.PrefixCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
for k, id := c.First(); k != nil && c.Continue(k); k, id = c.Next() {
|
||||
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func generatePrefix(value []byte) []byte {
|
||||
prefix := make([]byte, len(value)+2)
|
||||
var i int
|
||||
for i = range value {
|
||||
prefix[i] = value[i]
|
||||
}
|
||||
prefix[i+1] = '_'
|
||||
prefix[i+2] = '_'
|
||||
return prefix
|
||||
}
|
15
vendor/github.com/asdine/storm/index/options.go
generated
vendored
Normal file
15
vendor/github.com/asdine/storm/index/options.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package index
|
||||
|
||||
// NewOptions creates initialized Options
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
Limit: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Options are used to customize queries
|
||||
type Options struct {
|
||||
Limit int
|
||||
Skip int
|
||||
Reverse bool
|
||||
}
|
183
vendor/github.com/asdine/storm/index/unique.go
generated
vendored
Normal file
183
vendor/github.com/asdine/storm/index/unique.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/asdine/storm/internal"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// NewUniqueIndex loads a UniqueIndex
|
||||
func NewUniqueIndex(parent *bolt.Bucket, indexName []byte) (*UniqueIndex, error) {
|
||||
var err error
|
||||
b := parent.Bucket(indexName)
|
||||
if b == nil {
|
||||
if !parent.Writable() {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
b, err = parent.CreateBucket(indexName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &UniqueIndex{
|
||||
IndexBucket: b,
|
||||
Parent: parent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UniqueIndex is an index that references unique values and the corresponding ID.
|
||||
type UniqueIndex struct {
|
||||
Parent *bolt.Bucket
|
||||
IndexBucket *bolt.Bucket
|
||||
}
|
||||
|
||||
// Add a value to the unique index
|
||||
func (idx *UniqueIndex) Add(value []byte, targetID []byte) error {
|
||||
if value == nil || len(value) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
if targetID == nil || len(targetID) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
exists := idx.IndexBucket.Get(value)
|
||||
if exists != nil {
|
||||
if bytes.Equal(exists, targetID) {
|
||||
return nil
|
||||
}
|
||||
return ErrAlreadyExists
|
||||
}
|
||||
|
||||
return idx.IndexBucket.Put(value, targetID)
|
||||
}
|
||||
|
||||
// Remove a value from the unique index
|
||||
func (idx *UniqueIndex) Remove(value []byte) error {
|
||||
return idx.IndexBucket.Delete(value)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the unique index
|
||||
func (idx *UniqueIndex) RemoveID(id []byte) error {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
if bytes.Equal(ident, id) {
|
||||
return idx.Remove(val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the id corresponding to the given value
|
||||
func (idx *UniqueIndex) Get(value []byte) []byte {
|
||||
return idx.IndexBucket.Get(value)
|
||||
}
|
||||
|
||||
// All returns all the ids corresponding to the given value
|
||||
func (idx *UniqueIndex) All(value []byte, opts *Options) ([][]byte, error) {
|
||||
id := idx.IndexBucket.Get(value)
|
||||
if id != nil {
|
||||
return [][]byte{id}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AllRecords returns all the IDs of this index
|
||||
func (idx *UniqueIndex) AllRecords(opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Range returns the ids corresponding to the given range of values
|
||||
func (idx *UniqueIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.RangeCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Min: min,
|
||||
Max: max,
|
||||
CompareFn: func(val, limit []byte) int {
|
||||
return bytes.Compare(val, limit)
|
||||
},
|
||||
}
|
||||
|
||||
for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Prefix returns the ids whose values have the given prefix.
|
||||
func (idx *UniqueIndex) Prefix(prefix []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.PrefixCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// first returns the first ID of this index
|
||||
func (idx *UniqueIndex) first() []byte {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
return ident
|
||||
}
|
||||
return nil
|
||||
}
|
112
vendor/github.com/asdine/storm/internal/boltdb.go
generated
vendored
Normal file
112
vendor/github.com/asdine/storm/internal/boltdb.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// Cursor that can be reversed
|
||||
type Cursor struct {
|
||||
C *bolt.Cursor
|
||||
Reverse bool
|
||||
}
|
||||
|
||||
// First element
|
||||
func (c *Cursor) First() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Last()
|
||||
}
|
||||
|
||||
return c.C.First()
|
||||
}
|
||||
|
||||
// Next element
|
||||
func (c *Cursor) Next() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Prev()
|
||||
}
|
||||
|
||||
return c.C.Next()
|
||||
}
|
||||
|
||||
// RangeCursor that can be reversed
|
||||
type RangeCursor struct {
|
||||
C *bolt.Cursor
|
||||
Reverse bool
|
||||
Min []byte
|
||||
Max []byte
|
||||
CompareFn func([]byte, []byte) int
|
||||
}
|
||||
|
||||
// First element
|
||||
func (c *RangeCursor) First() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Seek(c.Max)
|
||||
}
|
||||
|
||||
return c.C.Seek(c.Min)
|
||||
}
|
||||
|
||||
// Next element
|
||||
func (c *RangeCursor) Next() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Prev()
|
||||
}
|
||||
|
||||
return c.C.Next()
|
||||
}
|
||||
|
||||
// Continue tells if the loop needs to continue
|
||||
func (c *RangeCursor) Continue(val []byte) bool {
|
||||
if c.Reverse {
|
||||
return val != nil && c.CompareFn(val, c.Min) >= 0
|
||||
}
|
||||
|
||||
return val != nil && c.CompareFn(val, c.Max) <= 0
|
||||
}
|
||||
|
||||
// PrefixCursor that can be reversed
|
||||
type PrefixCursor struct {
|
||||
C *bolt.Cursor
|
||||
Reverse bool
|
||||
Prefix []byte
|
||||
}
|
||||
|
||||
// First element
|
||||
func (c *PrefixCursor) First() ([]byte, []byte) {
|
||||
var k, v []byte
|
||||
|
||||
for k, v = c.C.First(); k != nil && !bytes.HasPrefix(k, c.Prefix); k, v = c.C.Next() {
|
||||
}
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if c.Reverse {
|
||||
kc, vc := k, v
|
||||
for ; kc != nil && bytes.HasPrefix(kc, c.Prefix); kc, vc = c.C.Next() {
|
||||
k, v = kc, vc
|
||||
}
|
||||
if kc != nil {
|
||||
k, v = c.C.Prev()
|
||||
}
|
||||
}
|
||||
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Next element
|
||||
func (c *PrefixCursor) Next() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Prev()
|
||||
}
|
||||
|
||||
return c.C.Next()
|
||||
}
|
||||
|
||||
// Continue tells if the loop needs to continue
|
||||
func (c *PrefixCursor) Continue(val []byte) bool {
|
||||
return val != nil && bytes.HasPrefix(val, c.Prefix)
|
||||
}
|
145
vendor/github.com/asdine/storm/kv.go
generated
vendored
Normal file
145
vendor/github.com/asdine/storm/kv.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// KeyValueStore can store and fetch values by key
|
||||
type KeyValueStore interface {
|
||||
// Get a value from a bucket
|
||||
Get(bucketName string, key interface{}, to interface{}) error
|
||||
// Set a key/value pair into a bucket
|
||||
Set(bucketName string, key interface{}, value interface{}) error
|
||||
// Delete deletes a key from a bucket
|
||||
Delete(bucketName string, key interface{}) error
|
||||
// GetBytes gets a raw value from a bucket.
|
||||
GetBytes(bucketName string, key interface{}) ([]byte, error)
|
||||
// SetBytes sets a raw value into a bucket.
|
||||
SetBytes(bucketName string, key interface{}, value []byte) error
|
||||
}
|
||||
|
||||
// GetBytes gets a raw value from a bucket.
|
||||
func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) {
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var val []byte
|
||||
return val, n.readTx(func(tx *bolt.Tx) error {
|
||||
raw, err := n.getBytes(tx, bucketName, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val = make([]byte, len(raw))
|
||||
copy(val, raw)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetBytes gets a raw value from a bucket.
|
||||
func (n *node) getBytes(tx *bolt.Tx, bucketName string, id []byte) ([]byte, error) {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
// SetBytes sets a raw value into a bucket.
|
||||
func (n *node) SetBytes(bucketName string, key interface{}, value []byte) error {
|
||||
if key == nil {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.setBytes(tx, bucketName, id, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) setBytes(tx *bolt.Tx, bucketName string, id, data []byte) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// save node configuration in the bucket
|
||||
_, err = newMeta(bucket, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bucket.Put(id, data)
|
||||
}
|
||||
|
||||
// Get a value from a bucket
|
||||
func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr {
|
||||
return ErrPtrNeeded
|
||||
}
|
||||
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
raw, err := n.getBytes(tx, bucketName, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.codec.Unmarshal(raw, to)
|
||||
})
|
||||
}
|
||||
|
||||
// Set a key/value pair into a bucket
|
||||
func (n *node) Set(bucketName string, key interface{}, value interface{}) error {
|
||||
var data []byte
|
||||
var err error
|
||||
if value != nil {
|
||||
data, err = n.codec.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return n.SetBytes(bucketName, key, data)
|
||||
}
|
||||
|
||||
// Delete deletes a key from a bucket
|
||||
func (n *node) Delete(bucketName string, key interface{}) error {
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.delete(tx, bucketName, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) delete(tx *bolt.Tx, bucketName string, id []byte) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return bucket.Delete(id)
|
||||
}
|
69
vendor/github.com/asdine/storm/metadata.go
generated
vendored
Normal file
69
vendor/github.com/asdine/storm/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
metaCodec = "codec"
|
||||
)
|
||||
|
||||
func newMeta(b *bolt.Bucket, n Node) (*meta, error) {
|
||||
m := b.Bucket([]byte(metadataBucket))
|
||||
if m != nil {
|
||||
name := m.Get([]byte(metaCodec))
|
||||
if string(name) != n.Codec().Name() {
|
||||
return nil, ErrDifferentCodec
|
||||
}
|
||||
return &meta{
|
||||
node: n,
|
||||
bucket: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
m, err := b.CreateBucket([]byte(metadataBucket))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Put([]byte(metaCodec), []byte(n.Codec().Name()))
|
||||
return &meta{
|
||||
node: n,
|
||||
bucket: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type meta struct {
|
||||
node Node
|
||||
bucket *bolt.Bucket
|
||||
}
|
||||
|
||||
func (m *meta) increment(field *fieldConfig) error {
|
||||
var err error
|
||||
counter := field.IncrementStart
|
||||
|
||||
raw := m.bucket.Get([]byte(field.Name + "counter"))
|
||||
if raw != nil {
|
||||
counter, err = numberfromb(raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
counter++
|
||||
}
|
||||
|
||||
raw, err = numbertob(counter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.bucket.Put([]byte(field.Name+"counter"), raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
field.Value.Set(reflect.ValueOf(counter).Convert(field.Value.Type()))
|
||||
field.IsZero = false
|
||||
return nil
|
||||
}
|
126
vendor/github.com/asdine/storm/node.go
generated
vendored
Normal file
126
vendor/github.com/asdine/storm/node.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"github.com/asdine/storm/codec"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// A Node in Storm represents the API to a BoltDB bucket.
|
||||
type Node interface {
|
||||
Tx
|
||||
TypeStore
|
||||
KeyValueStore
|
||||
BucketScanner
|
||||
|
||||
// From returns a new Storm node with a new bucket root below the current.
|
||||
// All DB operations on the new node will be executed relative to this bucket.
|
||||
From(addend ...string) Node
|
||||
|
||||
// Bucket returns the bucket name as a slice from the root.
|
||||
// In the normal, simple case this will be empty.
|
||||
Bucket() []string
|
||||
|
||||
// GetBucket returns the given bucket below the current node.
|
||||
GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket
|
||||
|
||||
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
|
||||
// already exist.
|
||||
CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error)
|
||||
|
||||
// WithTransaction returns a New Storm node that will use the given transaction.
|
||||
WithTransaction(tx *bolt.Tx) Node
|
||||
|
||||
// Begin starts a new transaction.
|
||||
Begin(writable bool) (Node, error)
|
||||
|
||||
// Codec used by this instance of Storm
|
||||
Codec() codec.MarshalUnmarshaler
|
||||
|
||||
// WithCodec returns a New Storm Node that will use the given Codec.
|
||||
WithCodec(codec codec.MarshalUnmarshaler) Node
|
||||
|
||||
// WithBatch returns a new Storm Node with the batch mode enabled.
|
||||
WithBatch(enabled bool) Node
|
||||
}
|
||||
|
||||
// A Node in Storm represents the API to a BoltDB bucket.
|
||||
type node struct {
|
||||
s *DB
|
||||
|
||||
// The root bucket. In the normal, simple case this will be empty.
|
||||
rootBucket []string
|
||||
|
||||
// Transaction object. Nil if not in transaction
|
||||
tx *bolt.Tx
|
||||
|
||||
// Codec of this node
|
||||
codec codec.MarshalUnmarshaler
|
||||
|
||||
// Enable batch mode for read-write transaction, instead of update mode
|
||||
batchMode bool
|
||||
}
|
||||
|
||||
// From returns a new Storm Node with a new bucket root below the current.
|
||||
// All DB operations on the new node will be executed relative to this bucket.
|
||||
func (n node) From(addend ...string) Node {
|
||||
n.rootBucket = append(n.rootBucket, addend...)
|
||||
return &n
|
||||
}
|
||||
|
||||
// WithTransaction returns a new Storm Node that will use the given transaction.
|
||||
func (n node) WithTransaction(tx *bolt.Tx) Node {
|
||||
n.tx = tx
|
||||
return &n
|
||||
}
|
||||
|
||||
// WithCodec returns a new Storm Node that will use the given Codec.
|
||||
func (n node) WithCodec(codec codec.MarshalUnmarshaler) Node {
|
||||
n.codec = codec
|
||||
return &n
|
||||
}
|
||||
|
||||
// WithBatch returns a new Storm Node with the batch mode enabled.
|
||||
func (n node) WithBatch(enabled bool) Node {
|
||||
n.batchMode = enabled
|
||||
return &n
|
||||
}
|
||||
|
||||
// Bucket returns the bucket name as a slice from the root.
|
||||
// In the normal, simple case this will be empty.
|
||||
func (n *node) Bucket() []string {
|
||||
return n.rootBucket
|
||||
}
|
||||
|
||||
// Codec returns the EncodeDecoder used by this instance of Storm
|
||||
func (n *node) Codec() codec.MarshalUnmarshaler {
|
||||
return n.codec
|
||||
}
|
||||
|
||||
// Detects if already in transaction or runs a read write transaction.
|
||||
// Uses batch mode if enabled.
|
||||
func (n *node) readWriteTx(fn func(tx *bolt.Tx) error) error {
|
||||
if n.tx != nil {
|
||||
return fn(n.tx)
|
||||
}
|
||||
|
||||
if n.batchMode {
|
||||
return n.s.Bolt.Batch(func(tx *bolt.Tx) error {
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
||||
|
||||
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
||||
|
||||
// Detects if already in transaction or runs a read transaction.
|
||||
func (n *node) readTx(fn func(tx *bolt.Tx) error) error {
|
||||
if n.tx != nil {
|
||||
return fn(n.tx)
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
97
vendor/github.com/asdine/storm/options.go
generated
vendored
Normal file
97
vendor/github.com/asdine/storm/options.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/asdine/storm/codec"
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// BoltOptions used to pass options to BoltDB.
|
||||
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.boltMode = mode
|
||||
opts.boltOptions = options
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Codec used to set a custom encoder and decoder. The default is JSON.
|
||||
func Codec(c codec.MarshalUnmarshaler) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.codec = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Batch enables the use of batch instead of update for read-write transactions.
|
||||
func Batch() func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.batchMode = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Root used to set the root bucket. See also the From method.
|
||||
func Root(root ...string) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.rootBucket = root
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// UseDB allows Storm to use an existing open Bolt.DB.
|
||||
// Warning: storm.DB.Close() will close the bolt.DB instance.
|
||||
func UseDB(b *bolt.DB) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.path = b.Path()
|
||||
opts.bolt = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Limit sets the maximum number of records to return
|
||||
func Limit(limit int) func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Limit = limit
|
||||
}
|
||||
}
|
||||
|
||||
// Skip sets the number of records to skip
|
||||
func Skip(offset int) func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Skip = offset
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse will return the results in descending order
|
||||
func Reverse() func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Reverse = true
|
||||
}
|
||||
}
|
||||
|
||||
// Options are used to customize the way Storm opens a database.
|
||||
type Options struct {
|
||||
// Handles encoding and decoding of objects
|
||||
codec codec.MarshalUnmarshaler
|
||||
|
||||
// Bolt file mode
|
||||
boltMode os.FileMode
|
||||
|
||||
// Bolt options
|
||||
boltOptions *bolt.Options
|
||||
|
||||
// Enable batch mode for read-write transaction, instead of update mode
|
||||
batchMode bool
|
||||
|
||||
// The root bucket name
|
||||
rootBucket []string
|
||||
|
||||
// Path of the database file
|
||||
path string
|
||||
|
||||
// Bolt is still easily accessible
|
||||
bolt *bolt.DB
|
||||
}
|
107
vendor/github.com/asdine/storm/q/compare.go
generated
vendored
Normal file
107
vendor/github.com/asdine/storm/q/compare.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package q
|
||||
|
||||
import (
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func compare(a, b interface{}, tok token.Token) bool {
|
||||
vala := reflect.ValueOf(a)
|
||||
valb := reflect.ValueOf(b)
|
||||
|
||||
ak := vala.Kind()
|
||||
bk := valb.Kind()
|
||||
switch {
|
||||
// comparing nil values
|
||||
case (ak == reflect.Ptr || ak == reflect.Slice || ak == reflect.Interface || ak == reflect.Invalid) &&
|
||||
(bk == reflect.Ptr || ak == reflect.Slice || bk == reflect.Interface || bk == reflect.Invalid) &&
|
||||
(!vala.IsValid() || vala.IsNil()) && (!valb.IsValid() || valb.IsNil()):
|
||||
return true
|
||||
case ak >= reflect.Int && ak <= reflect.Int64:
|
||||
if bk >= reflect.Int && bk <= reflect.Int64 {
|
||||
return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(valb.Int()))
|
||||
}
|
||||
|
||||
if bk >= reflect.Uint && bk <= reflect.Uint64 {
|
||||
return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(int64(valb.Uint())))
|
||||
}
|
||||
|
||||
if bk == reflect.Float32 || bk == reflect.Float64 {
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(valb.Float()))
|
||||
}
|
||||
|
||||
if bk == reflect.String {
|
||||
bla, err := strconv.ParseFloat(valb.String(), 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(bla))
|
||||
}
|
||||
case ak >= reflect.Uint && ak <= reflect.Uint64:
|
||||
if bk >= reflect.Uint && bk <= reflect.Uint64 {
|
||||
return constant.Compare(constant.MakeUint64(vala.Uint()), tok, constant.MakeUint64(valb.Uint()))
|
||||
}
|
||||
|
||||
if bk >= reflect.Int && bk <= reflect.Int64 {
|
||||
return constant.Compare(constant.MakeUint64(vala.Uint()), tok, constant.MakeUint64(uint64(valb.Int())))
|
||||
}
|
||||
|
||||
if bk == reflect.Float32 || bk == reflect.Float64 {
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Uint())), tok, constant.MakeFloat64(valb.Float()))
|
||||
}
|
||||
|
||||
if bk == reflect.String {
|
||||
bla, err := strconv.ParseFloat(valb.String(), 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Uint())), tok, constant.MakeFloat64(bla))
|
||||
}
|
||||
case ak == reflect.Float32 || ak == reflect.Float64:
|
||||
if bk == reflect.Float32 || bk == reflect.Float64 {
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(valb.Float()))
|
||||
}
|
||||
|
||||
if bk >= reflect.Int && bk <= reflect.Int64 {
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Int())))
|
||||
}
|
||||
|
||||
if bk >= reflect.Uint && bk <= reflect.Uint64 {
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Uint())))
|
||||
}
|
||||
|
||||
if bk == reflect.String {
|
||||
bla, err := strconv.ParseFloat(valb.String(), 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(bla))
|
||||
}
|
||||
case ak == reflect.String:
|
||||
if bk == reflect.String {
|
||||
return constant.Compare(constant.MakeString(vala.String()), tok, constant.MakeString(valb.String()))
|
||||
}
|
||||
}
|
||||
|
||||
if reflect.TypeOf(a).String() == "time.Time" && reflect.TypeOf(b).String() == "time.Time" {
|
||||
var x, y int64
|
||||
x = 1
|
||||
if vala.MethodByName("Equal").Call([]reflect.Value{valb})[0].Bool() {
|
||||
y = 1
|
||||
} else if vala.MethodByName("Before").Call([]reflect.Value{valb})[0].Bool() {
|
||||
y = 2
|
||||
}
|
||||
return constant.Compare(constant.MakeInt64(x), tok, constant.MakeInt64(y))
|
||||
}
|
||||
|
||||
if tok == token.EQL {
|
||||
return reflect.DeepEqual(a, b)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
39
vendor/github.com/asdine/storm/q/fieldmatcher.go
generated
vendored
Normal file
39
vendor/github.com/asdine/storm/q/fieldmatcher.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package q
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// ErrUnknownField is returned when an unknown field is passed.
|
||||
var ErrUnknownField = errors.New("unknown field")
|
||||
|
||||
type fieldMatcherDelegate struct {
|
||||
FieldMatcher
|
||||
Field string
|
||||
}
|
||||
|
||||
// NewFieldMatcher creates a Matcher for a given field.
|
||||
func NewFieldMatcher(field string, fm FieldMatcher) Matcher {
|
||||
return fieldMatcherDelegate{Field: field, FieldMatcher: fm}
|
||||
}
|
||||
|
||||
// FieldMatcher can be used in NewFieldMatcher as a simple way to create the
|
||||
// most common Matcher: A Matcher that evaluates one field's value.
|
||||
// For more complex scenarios, implement the Matcher interface directly.
|
||||
type FieldMatcher interface {
|
||||
MatchField(v interface{}) (bool, error)
|
||||
}
|
||||
|
||||
func (r fieldMatcherDelegate) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return r.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (r fieldMatcherDelegate) MatchValue(v *reflect.Value) (bool, error) {
|
||||
field := v.FieldByName(r.Field)
|
||||
if !field.IsValid() {
|
||||
return false, ErrUnknownField
|
||||
}
|
||||
return r.MatchField(field.Interface())
|
||||
}
|
51
vendor/github.com/asdine/storm/q/regexp.go
generated
vendored
Normal file
51
vendor/github.com/asdine/storm/q/regexp.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package q
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Re creates a regexp matcher. It checks if the given field matches the given regexp.
|
||||
// Note that this only supports fields of type string or []byte.
|
||||
func Re(field string, re string) Matcher {
|
||||
regexpCache.RLock()
|
||||
if r, ok := regexpCache.m[re]; ok {
|
||||
regexpCache.RUnlock()
|
||||
return NewFieldMatcher(field, ®expMatcher{r: r})
|
||||
}
|
||||
regexpCache.RUnlock()
|
||||
|
||||
regexpCache.Lock()
|
||||
r, err := regexp.Compile(re)
|
||||
if err == nil {
|
||||
regexpCache.m[re] = r
|
||||
}
|
||||
regexpCache.Unlock()
|
||||
|
||||
return NewFieldMatcher(field, ®expMatcher{r: r, err: err})
|
||||
}
|
||||
|
||||
var regexpCache = struct {
|
||||
sync.RWMutex
|
||||
m map[string]*regexp.Regexp
|
||||
}{m: make(map[string]*regexp.Regexp)}
|
||||
|
||||
type regexpMatcher struct {
|
||||
r *regexp.Regexp
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *regexpMatcher) MatchField(v interface{}) (bool, error) {
|
||||
if r.err != nil {
|
||||
return false, r.err
|
||||
}
|
||||
switch fieldValue := v.(type) {
|
||||
case string:
|
||||
return r.r.MatchString(fieldValue), nil
|
||||
case []byte:
|
||||
return r.r.Match(fieldValue), nil
|
||||
default:
|
||||
return false, fmt.Errorf("Only string and []byte supported for regexp matcher, got %T", fieldValue)
|
||||
}
|
||||
}
|
222
vendor/github.com/asdine/storm/q/tree.go
generated
vendored
Normal file
222
vendor/github.com/asdine/storm/q/tree.go
generated
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
// Package q contains a list of Matchers used to compare struct fields with values
|
||||
package q
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A Matcher is used to test against a record to see if it matches.
|
||||
type Matcher interface {
|
||||
// Match is used to test the criteria against a structure.
|
||||
Match(interface{}) (bool, error)
|
||||
}
|
||||
|
||||
// A ValueMatcher is used to test against a reflect.Value.
|
||||
type ValueMatcher interface {
|
||||
// MatchValue tests if the given reflect.Value matches.
|
||||
// It is useful when the reflect.Value of an object already exists.
|
||||
MatchValue(*reflect.Value) (bool, error)
|
||||
}
|
||||
|
||||
type cmp struct {
|
||||
value interface{}
|
||||
token token.Token
|
||||
}
|
||||
|
||||
func (c *cmp) MatchField(v interface{}) (bool, error) {
|
||||
return compare(v, c.value, c.token), nil
|
||||
}
|
||||
|
||||
type trueMatcher struct{}
|
||||
|
||||
func (*trueMatcher) Match(i interface{}) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (*trueMatcher) MatchValue(v *reflect.Value) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type or struct {
|
||||
children []Matcher
|
||||
}
|
||||
|
||||
func (c *or) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return c.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (c *or) MatchValue(v *reflect.Value) (bool, error) {
|
||||
for _, matcher := range c.children {
|
||||
if vm, ok := matcher.(ValueMatcher); ok {
|
||||
ok, err := vm.MatchValue(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ok, err := matcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type and struct {
|
||||
children []Matcher
|
||||
}
|
||||
|
||||
func (c *and) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return c.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (c *and) MatchValue(v *reflect.Value) (bool, error) {
|
||||
for _, matcher := range c.children {
|
||||
if vm, ok := matcher.(ValueMatcher); ok {
|
||||
ok, err := vm.MatchValue(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ok, err := matcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type strictEq struct {
|
||||
field string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (s *strictEq) MatchField(v interface{}) (bool, error) {
|
||||
return reflect.DeepEqual(v, s.value), nil
|
||||
}
|
||||
|
||||
type in struct {
|
||||
list interface{}
|
||||
}
|
||||
|
||||
func (i *in) MatchField(v interface{}) (bool, error) {
|
||||
ref := reflect.ValueOf(i.list)
|
||||
if ref.Kind() != reflect.Slice {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
c := cmp{
|
||||
token: token.EQL,
|
||||
}
|
||||
|
||||
for i := 0; i < ref.Len(); i++ {
|
||||
c.value = ref.Index(i).Interface()
|
||||
ok, err := c.MatchField(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type not struct {
|
||||
children []Matcher
|
||||
}
|
||||
|
||||
func (n *not) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return n.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (n *not) MatchValue(v *reflect.Value) (bool, error) {
|
||||
var err error
|
||||
|
||||
for _, matcher := range n.children {
|
||||
vm, ok := matcher.(ValueMatcher)
|
||||
if ok {
|
||||
ok, err = vm.MatchValue(v)
|
||||
} else {
|
||||
ok, err = matcher.Match(v.Interface())
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Eq matcher, checks if the given field is equal to the given value
|
||||
func Eq(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.EQL})
|
||||
}
|
||||
|
||||
// StrictEq matcher, checks if the given field is deeply equal to the given value
|
||||
func StrictEq(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &strictEq{value: v})
|
||||
}
|
||||
|
||||
// Gt matcher, checks if the given field is greater than the given value
|
||||
func Gt(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.GTR})
|
||||
}
|
||||
|
||||
// Gte matcher, checks if the given field is greater than or equal to the given value
|
||||
func Gte(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.GEQ})
|
||||
}
|
||||
|
||||
// Lt matcher, checks if the given field is lesser than the given value
|
||||
func Lt(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.LSS})
|
||||
}
|
||||
|
||||
// Lte matcher, checks if the given field is lesser than or equal to the given value
|
||||
func Lte(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.LEQ})
|
||||
}
|
||||
|
||||
// In matcher, checks if the given field matches one of the value of the given slice.
|
||||
// v must be a slice.
|
||||
func In(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &in{list: v})
|
||||
}
|
||||
|
||||
// True matcher, always returns true
|
||||
func True() Matcher { return &trueMatcher{} }
|
||||
|
||||
// Or matcher, checks if at least one of the given matchers matches the record
|
||||
func Or(matchers ...Matcher) Matcher { return &or{children: matchers} }
|
||||
|
||||
// And matcher, checks if all of the given matchers matches the record
|
||||
func And(matchers ...Matcher) Matcher { return &and{children: matchers} }
|
||||
|
||||
// Not matcher, checks if all of the given matchers return false
|
||||
func Not(matchers ...Matcher) Matcher { return ¬{children: matchers} }
|
219
vendor/github.com/asdine/storm/query.go
generated
vendored
Normal file
219
vendor/github.com/asdine/storm/query.go
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"github.com/asdine/storm/internal"
|
||||
"github.com/asdine/storm/q"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// Select a list of records that match a list of matchers. Doesn't use indexes.
|
||||
func (n *node) Select(matchers ...q.Matcher) Query {
|
||||
tree := q.And(matchers...)
|
||||
return newQuery(n, tree)
|
||||
}
|
||||
|
||||
// Query is the low level query engine used by Storm. It allows to operate searches through an entire bucket.
|
||||
type Query interface {
|
||||
// Skip matching records by the given number
|
||||
Skip(int) Query
|
||||
|
||||
// Limit the results by the given number
|
||||
Limit(int) Query
|
||||
|
||||
// Order by the given fields, in descending precedence, left-to-right.
|
||||
OrderBy(...string) Query
|
||||
|
||||
// Reverse the order of the results
|
||||
Reverse() Query
|
||||
|
||||
// Bucket specifies the bucket name
|
||||
Bucket(string) Query
|
||||
|
||||
// Find a list of matching records
|
||||
Find(interface{}) error
|
||||
|
||||
// First gets the first matching record
|
||||
First(interface{}) error
|
||||
|
||||
// Delete all matching records
|
||||
Delete(interface{}) error
|
||||
|
||||
// Count all the matching records
|
||||
Count(interface{}) (int, error)
|
||||
|
||||
// Returns all the records without decoding them
|
||||
Raw() ([][]byte, error)
|
||||
|
||||
// Execute the given function for each raw element
|
||||
RawEach(func([]byte, []byte) error) error
|
||||
|
||||
// Execute the given function for each element
|
||||
Each(interface{}, func(interface{}) error) error
|
||||
}
|
||||
|
||||
func newQuery(n *node, tree q.Matcher) *query {
|
||||
return &query{
|
||||
skip: 0,
|
||||
limit: -1,
|
||||
node: n,
|
||||
tree: tree,
|
||||
}
|
||||
}
|
||||
|
||||
type query struct {
|
||||
limit int
|
||||
skip int
|
||||
reverse bool
|
||||
tree q.Matcher
|
||||
node *node
|
||||
bucket string
|
||||
orderBy []string
|
||||
}
|
||||
|
||||
func (q *query) Skip(nb int) Query {
|
||||
q.skip = nb
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Limit(nb int) Query {
|
||||
q.limit = nb
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) OrderBy(field ...string) Query {
|
||||
q.orderBy = field
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Reverse() Query {
|
||||
q.reverse = true
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Bucket(bucketName string) Query {
|
||||
q.bucket = bucketName
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Find(to interface{}) error {
|
||||
sink, err := newListSink(q.node, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) First(to interface{}) error {
|
||||
sink, err := newFirstSink(q.node, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.limit = 1
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) Delete(kind interface{}) error {
|
||||
sink, err := newDeleteSink(q.node, kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) Count(kind interface{}) (int, error) {
|
||||
sink, err := newCountSink(q.node, kind)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = q.runQuery(sink)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return sink.counter, nil
|
||||
}
|
||||
|
||||
func (q *query) Raw() ([][]byte, error) {
|
||||
sink := newRawSink()
|
||||
|
||||
err := q.runQuery(sink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sink.results, nil
|
||||
}
|
||||
|
||||
func (q *query) RawEach(fn func([]byte, []byte) error) error {
|
||||
sink := newRawSink()
|
||||
|
||||
sink.execFn = fn
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) Each(kind interface{}, fn func(interface{}) error) error {
|
||||
sink, err := newEachSink(kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sink.execFn = fn
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) runQuery(sink sink) error {
|
||||
if q.node.tx != nil {
|
||||
return q.query(q.node.tx, sink)
|
||||
}
|
||||
if sink.readOnly() {
|
||||
return q.node.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return q.query(tx, sink)
|
||||
})
|
||||
}
|
||||
return q.node.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return q.query(tx, sink)
|
||||
})
|
||||
}
|
||||
|
||||
func (q *query) query(tx *bolt.Tx, sink sink) error {
|
||||
bucketName := q.bucket
|
||||
if bucketName == "" {
|
||||
bucketName = sink.bucketName()
|
||||
}
|
||||
bucket := q.node.GetBucket(tx, bucketName)
|
||||
|
||||
if q.limit == 0 {
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
sorter := newSorter(q.node, sink)
|
||||
sorter.orderBy = q.orderBy
|
||||
sorter.reverse = q.reverse
|
||||
sorter.skip = q.skip
|
||||
sorter.limit = q.limit
|
||||
if bucket != nil {
|
||||
c := internal.Cursor{C: bucket.Cursor(), Reverse: q.reverse}
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
stop, err := sorter.filter(q.tree, bucket, k, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
99
vendor/github.com/asdine/storm/scan.go
generated
vendored
Normal file
99
vendor/github.com/asdine/storm/scan.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// A BucketScanner scans a Node for a list of buckets
|
||||
type BucketScanner interface {
|
||||
// PrefixScan scans the root buckets for keys matching the given prefix.
|
||||
PrefixScan(prefix string) []Node
|
||||
// PrefixScan scans the buckets in this node for keys matching the given prefix.
|
||||
RangeScan(min, max string) []Node
|
||||
}
|
||||
|
||||
// PrefixScan scans the buckets in this node for keys matching the given prefix.
|
||||
func (n *node) PrefixScan(prefix string) []Node {
|
||||
if n.tx != nil {
|
||||
return n.prefixScan(n.tx, prefix)
|
||||
}
|
||||
|
||||
var nodes []Node
|
||||
|
||||
n.readTx(func(tx *bolt.Tx) error {
|
||||
nodes = n.prefixScan(tx, prefix)
|
||||
return nil
|
||||
})
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (n *node) prefixScan(tx *bolt.Tx, prefix string) []Node {
|
||||
|
||||
var (
|
||||
prefixBytes = []byte(prefix)
|
||||
nodes []Node
|
||||
c = n.cursor(tx)
|
||||
)
|
||||
|
||||
for k, v := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, v = c.Next() {
|
||||
if v != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
nodes = append(nodes, n.From(string(k)))
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// RangeScan scans the buckets in this node over a range such as a sortable time range.
|
||||
func (n *node) RangeScan(min, max string) []Node {
|
||||
if n.tx != nil {
|
||||
return n.rangeScan(n.tx, min, max)
|
||||
}
|
||||
|
||||
var nodes []Node
|
||||
|
||||
n.readTx(func(tx *bolt.Tx) error {
|
||||
nodes = n.rangeScan(tx, min, max)
|
||||
return nil
|
||||
})
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (n *node) rangeScan(tx *bolt.Tx, min, max string) []Node {
|
||||
var (
|
||||
minBytes = []byte(min)
|
||||
maxBytes = []byte(max)
|
||||
nodes []Node
|
||||
c = n.cursor(tx)
|
||||
)
|
||||
|
||||
for k, v := c.Seek(minBytes); k != nil && bytes.Compare(k, maxBytes) <= 0; k, v = c.Next() {
|
||||
if v != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
nodes = append(nodes, n.From(string(k)))
|
||||
}
|
||||
|
||||
return nodes
|
||||
|
||||
}
|
||||
|
||||
func (n *node) cursor(tx *bolt.Tx) *bolt.Cursor {
|
||||
|
||||
var c *bolt.Cursor
|
||||
|
||||
if len(n.rootBucket) > 0 {
|
||||
c = n.GetBucket(tx).Cursor()
|
||||
} else {
|
||||
c = tx.Cursor()
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
608
vendor/github.com/asdine/storm/sink.go
generated
vendored
Normal file
608
vendor/github.com/asdine/storm/sink.go
generated
vendored
Normal file
@@ -0,0 +1,608 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/asdine/storm/q"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
type item struct {
|
||||
value *reflect.Value
|
||||
bucket *bolt.Bucket
|
||||
k []byte
|
||||
v []byte
|
||||
}
|
||||
|
||||
func newSorter(n Node, snk sink) *sorter {
|
||||
return &sorter{
|
||||
node: n,
|
||||
sink: snk,
|
||||
skip: 0,
|
||||
limit: -1,
|
||||
list: make([]*item, 0),
|
||||
err: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
type sorter struct {
|
||||
node Node
|
||||
sink sink
|
||||
list []*item
|
||||
skip int
|
||||
limit int
|
||||
orderBy []string
|
||||
reverse bool
|
||||
err chan error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (s *sorter) filter(tree q.Matcher, bucket *bolt.Bucket, k, v []byte) (bool, error) {
|
||||
itm := &item{
|
||||
bucket: bucket,
|
||||
k: k,
|
||||
v: v,
|
||||
}
|
||||
rsink, ok := s.sink.(reflectSink)
|
||||
if !ok {
|
||||
return s.add(itm)
|
||||
}
|
||||
|
||||
newElem := rsink.elem()
|
||||
if err := s.node.Codec().Unmarshal(v, newElem.Interface()); err != nil {
|
||||
return false, err
|
||||
}
|
||||
itm.value = &newElem
|
||||
|
||||
if tree != nil {
|
||||
ok, err := tree.Match(newElem.Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.orderBy) == 0 {
|
||||
return s.add(itm)
|
||||
}
|
||||
|
||||
if _, ok := s.sink.(sliceSink); ok {
|
||||
// add directly to sink, we'll apply skip/limits after sorting
|
||||
return false, s.sink.add(itm)
|
||||
}
|
||||
|
||||
s.list = append(s.list, itm)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (s *sorter) add(itm *item) (stop bool, err error) {
|
||||
if s.limit == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if s.skip > 0 {
|
||||
s.skip--
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if s.limit > 0 {
|
||||
s.limit--
|
||||
}
|
||||
|
||||
err = s.sink.add(itm)
|
||||
|
||||
return s.limit == 0, err
|
||||
}
|
||||
|
||||
func (s *sorter) compareValue(left reflect.Value, right reflect.Value) int {
|
||||
if !left.IsValid() || !right.IsValid() {
|
||||
if left.IsValid() {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
switch left.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
l, r := left.Int(), right.Int()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
l, r := left.Uint(), right.Uint()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
l, r := left.Float(), right.Float()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
case reflect.String:
|
||||
l, r := left.String(), right.String()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
default:
|
||||
rawLeft, err := toBytes(left.Interface(), s.node.Codec())
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
rawRight, err := toBytes(right.Interface(), s.node.Codec())
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
l, r := string(rawLeft), string(rawRight)
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *sorter) less(leftElem reflect.Value, rightElem reflect.Value) bool {
|
||||
for _, orderBy := range s.orderBy {
|
||||
leftField := reflect.Indirect(leftElem).FieldByName(orderBy)
|
||||
if !leftField.IsValid() {
|
||||
s.err <- ErrNotFound
|
||||
return false
|
||||
}
|
||||
rightField := reflect.Indirect(rightElem).FieldByName(orderBy)
|
||||
if !rightField.IsValid() {
|
||||
s.err <- ErrNotFound
|
||||
return false
|
||||
}
|
||||
|
||||
direction := 1
|
||||
if s.reverse {
|
||||
direction = -1
|
||||
}
|
||||
|
||||
switch s.compareValue(leftField, rightField) * direction {
|
||||
case -1:
|
||||
return true
|
||||
case 1:
|
||||
return false
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *sorter) flush() error {
|
||||
if len(s.orderBy) == 0 {
|
||||
return s.sink.flush()
|
||||
}
|
||||
|
||||
go func() {
|
||||
sort.Sort(s)
|
||||
close(s.err)
|
||||
}()
|
||||
err := <-s.err
|
||||
close(s.done)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
if !ssink.slice().IsValid() {
|
||||
return s.sink.flush()
|
||||
}
|
||||
if s.skip >= ssink.slice().Len() {
|
||||
ssink.reset()
|
||||
return s.sink.flush()
|
||||
}
|
||||
leftBound := s.skip
|
||||
if leftBound < 0 {
|
||||
leftBound = 0
|
||||
}
|
||||
limit := s.limit
|
||||
if s.limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
rightBound := leftBound + limit
|
||||
if rightBound > ssink.slice().Len() || rightBound == leftBound {
|
||||
rightBound = ssink.slice().Len()
|
||||
}
|
||||
ssink.setSlice(ssink.slice().Slice(leftBound, rightBound))
|
||||
return s.sink.flush()
|
||||
}
|
||||
|
||||
for _, itm := range s.list {
|
||||
if itm == nil {
|
||||
break
|
||||
}
|
||||
stop, err := s.add(itm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return s.sink.flush()
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return 0
|
||||
default:
|
||||
}
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
return ssink.slice().Len()
|
||||
}
|
||||
return len(s.list)
|
||||
|
||||
}
|
||||
|
||||
func (s *sorter) Less(i, j int) bool {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return false
|
||||
default:
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
return s.less(ssink.slice().Index(i), ssink.slice().Index(j))
|
||||
}
|
||||
return s.less(*s.list[i].value, *s.list[j].value)
|
||||
}
|
||||
|
||||
type sink interface {
|
||||
bucketName() string
|
||||
flush() error
|
||||
add(*item) error
|
||||
readOnly() bool
|
||||
}
|
||||
|
||||
type reflectSink interface {
|
||||
elem() reflect.Value
|
||||
}
|
||||
|
||||
type sliceSink interface {
|
||||
slice() reflect.Value
|
||||
setSlice(reflect.Value)
|
||||
reset()
|
||||
}
|
||||
|
||||
func newListSink(node Node, to interface{}) (*listSink, error) {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
|
||||
return nil, ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
sliceType := reflect.Indirect(ref).Type()
|
||||
elemType := sliceType.Elem()
|
||||
|
||||
if elemType.Kind() == reflect.Ptr {
|
||||
elemType = elemType.Elem()
|
||||
}
|
||||
|
||||
if elemType.Name() == "" {
|
||||
return nil, ErrNoName
|
||||
}
|
||||
|
||||
return &listSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
isPtr: sliceType.Elem().Kind() == reflect.Ptr,
|
||||
elemType: elemType,
|
||||
name: elemType.Name(),
|
||||
results: reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type listSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
results reflect.Value
|
||||
elemType reflect.Type
|
||||
name string
|
||||
isPtr bool
|
||||
idx int
|
||||
}
|
||||
|
||||
func (l *listSink) slice() reflect.Value {
|
||||
return l.results
|
||||
}
|
||||
|
||||
func (l *listSink) setSlice(s reflect.Value) {
|
||||
l.results = s
|
||||
}
|
||||
|
||||
func (l *listSink) reset() {
|
||||
l.results = reflect.MakeSlice(reflect.Indirect(l.ref).Type(), 0, 0)
|
||||
}
|
||||
|
||||
func (l *listSink) elem() reflect.Value {
|
||||
if l.results.IsValid() && l.idx < l.results.Len() {
|
||||
return l.results.Index(l.idx).Addr()
|
||||
}
|
||||
return reflect.New(l.elemType)
|
||||
}
|
||||
|
||||
func (l *listSink) bucketName() string {
|
||||
return l.name
|
||||
}
|
||||
|
||||
func (l *listSink) add(i *item) error {
|
||||
if l.idx == l.results.Len() {
|
||||
if l.isPtr {
|
||||
l.results = reflect.Append(l.results, *i.value)
|
||||
} else {
|
||||
l.results = reflect.Append(l.results, reflect.Indirect(*i.value))
|
||||
}
|
||||
}
|
||||
|
||||
l.idx++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *listSink) flush() error {
|
||||
if l.results.IsValid() && l.results.Len() > 0 {
|
||||
reflect.Indirect(l.ref).Set(l.results)
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
func (l *listSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newFirstSink(node Node, to interface{}) (*firstSink, error) {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &firstSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type firstSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
found bool
|
||||
}
|
||||
|
||||
func (f *firstSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(f.ref).Type())
|
||||
}
|
||||
|
||||
func (f *firstSink) bucketName() string {
|
||||
return reflect.Indirect(f.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (f *firstSink) add(i *item) error {
|
||||
reflect.Indirect(f.ref).Set(i.value.Elem())
|
||||
f.found = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *firstSink) flush() error {
|
||||
if !f.found {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *firstSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newDeleteSink(node Node, kind interface{}) (*deleteSink, error) {
|
||||
ref := reflect.ValueOf(kind)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &deleteSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type deleteSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
removed int
|
||||
}
|
||||
|
||||
func (d *deleteSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(d.ref).Type())
|
||||
}
|
||||
|
||||
func (d *deleteSink) bucketName() string {
|
||||
return reflect.Indirect(d.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (d *deleteSink) add(i *item) error {
|
||||
info, err := extract(&d.ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range info.Fields {
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
idx, err := getIndex(i.bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.RemoveID(i.k)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.removed++
|
||||
return i.bucket.Delete(i.k)
|
||||
}
|
||||
|
||||
func (d *deleteSink) flush() error {
|
||||
if d.removed == 0 {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deleteSink) readOnly() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func newCountSink(node Node, kind interface{}) (*countSink, error) {
|
||||
ref := reflect.ValueOf(kind)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &countSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type countSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
counter int
|
||||
}
|
||||
|
||||
func (c *countSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(c.ref).Type())
|
||||
}
|
||||
|
||||
func (c *countSink) bucketName() string {
|
||||
return reflect.Indirect(c.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (c *countSink) add(i *item) error {
|
||||
c.counter++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *countSink) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *countSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newRawSink() *rawSink {
|
||||
return &rawSink{}
|
||||
}
|
||||
|
||||
type rawSink struct {
|
||||
results [][]byte
|
||||
execFn func([]byte, []byte) error
|
||||
}
|
||||
|
||||
func (r *rawSink) add(i *item) error {
|
||||
if r.execFn != nil {
|
||||
err := r.execFn(i.k, i.v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
r.results = append(r.results, i.v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rawSink) bucketName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (r *rawSink) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rawSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newEachSink(to interface{}) (*eachSink, error) {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &eachSink{
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type eachSink struct {
|
||||
ref reflect.Value
|
||||
execFn func(interface{}) error
|
||||
}
|
||||
|
||||
func (e *eachSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(e.ref).Type())
|
||||
}
|
||||
|
||||
func (e *eachSink) bucketName() string {
|
||||
return reflect.Indirect(e.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (e *eachSink) add(i *item) error {
|
||||
return e.execFn(i.value.Interface())
|
||||
}
|
||||
|
||||
func (e *eachSink) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *eachSink) readOnly() bool {
|
||||
return true
|
||||
}
|
22
vendor/github.com/asdine/storm/sink_sorter_swap.go
generated
vendored
Normal file
22
vendor/github.com/asdine/storm/sink_sorter_swap.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build !go1.8
|
||||
|
||||
package storm
|
||||
|
||||
import "reflect"
|
||||
|
||||
func (s *sorter) Swap(i, j int) {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
x, y := ssink.slice().Index(i).Interface(), ssink.slice().Index(j).Interface()
|
||||
ssink.slice().Index(i).Set(reflect.ValueOf(y))
|
||||
ssink.slice().Index(j).Set(reflect.ValueOf(x))
|
||||
} else {
|
||||
s.list[i], s.list[j] = s.list[j], s.list[i]
|
||||
}
|
||||
}
|
20
vendor/github.com/asdine/storm/sink_sorter_swap_go1.8.go
generated
vendored
Normal file
20
vendor/github.com/asdine/storm/sink_sorter_swap_go1.8.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build go1.8
|
||||
|
||||
package storm
|
||||
|
||||
import "reflect"
|
||||
|
||||
func (s *sorter) Swap(i, j int) {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
reflect.Swapper(ssink.slice().Interface())(i, j)
|
||||
} else {
|
||||
s.list[i], s.list[j] = s.list[j], s.list[i]
|
||||
}
|
||||
}
|
425
vendor/github.com/asdine/storm/store.go
generated
vendored
Normal file
425
vendor/github.com/asdine/storm/store.go
generated
vendored
Normal file
@@ -0,0 +1,425 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/asdine/storm/q"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
// TypeStore stores user defined types in BoltDB.
|
||||
type TypeStore interface {
|
||||
Finder
|
||||
// Init creates the indexes and buckets for a given structure
|
||||
Init(data interface{}) error
|
||||
|
||||
// ReIndex rebuilds all the indexes of a bucket
|
||||
ReIndex(data interface{}) error
|
||||
|
||||
// Save a structure
|
||||
Save(data interface{}) error
|
||||
|
||||
// Update a structure
|
||||
Update(data interface{}) error
|
||||
|
||||
// UpdateField updates a single field
|
||||
UpdateField(data interface{}, fieldName string, value interface{}) error
|
||||
|
||||
// Drop a bucket
|
||||
Drop(data interface{}) error
|
||||
|
||||
// DeleteStruct deletes a structure from the associated bucket
|
||||
DeleteStruct(data interface{}) error
|
||||
}
|
||||
|
||||
// Init creates the indexes and buckets for a given structure
|
||||
func (n *node) Init(data interface{}) error {
|
||||
v := reflect.ValueOf(data)
|
||||
cfg, err := extract(&v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.init(tx, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) init(tx *bolt.Tx, cfg *structConfig) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// save node configuration in the bucket
|
||||
_, err = newMeta(bucket, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range cfg.Fields {
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
switch fieldCfg.Index {
|
||||
case tagUniqueIdx:
|
||||
_, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
case tagIdx:
|
||||
_, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
default:
|
||||
err = ErrIdxNotFound
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) ReIndex(data interface{}) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.reIndex(tx, data, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) reIndex(tx *bolt.Tx, data interface{}, cfg *structConfig) error {
|
||||
root := n.WithTransaction(tx)
|
||||
nodes := root.From(cfg.Name).PrefixScan(indexPrefix)
|
||||
bucket := root.GetBucket(tx, cfg.Name)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
buckets := node.Bucket()
|
||||
name := buckets[len(buckets)-1]
|
||||
err := bucket.DeleteBucket([]byte(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
total, err := root.Count(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < total; i++ {
|
||||
err = root.Select(q.True()).Skip(i).First(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = root.Update(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save a structure
|
||||
func (n *node) Save(data interface{}) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.IsZero {
|
||||
if !cfg.ID.IsInteger || !cfg.ID.Increment {
|
||||
return ErrZeroID
|
||||
}
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.save(tx, cfg, data, false)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update bool) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// save node configuration in the bucket
|
||||
meta, err := newMeta(bucket, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.IsZero {
|
||||
err = meta.increment(cfg.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range cfg.Fields {
|
||||
if !update && !fieldCfg.IsID && fieldCfg.Increment && fieldCfg.IsInteger && fieldCfg.IsZero {
|
||||
err = meta.increment(fieldCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if update && fieldCfg.IsZero && !fieldCfg.ForceUpdate {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldCfg.IsZero {
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value, err := toBytes(fieldCfg.Value.Interface(), n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var found bool
|
||||
idsSaved, err := idx.All(value, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, idSaved := range idsSaved {
|
||||
if bytes.Compare(idSaved, id) == 0 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.Add(value, id)
|
||||
if err != nil {
|
||||
if err == index.ErrAlreadyExists {
|
||||
return ErrAlreadyExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
raw, err := n.codec.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bucket.Put(id, raw)
|
||||
}
|
||||
|
||||
// Update a structure
|
||||
func (n *node) Update(data interface{}) error {
|
||||
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
|
||||
numfield := ref.NumField()
|
||||
for i := 0; i < numfield; i++ {
|
||||
f := ref.Field(i)
|
||||
if ref.Type().Field(i).PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
zero := reflect.Zero(f.Type()).Interface()
|
||||
actual := f.Interface()
|
||||
if !reflect.DeepEqual(actual, zero) {
|
||||
cf := current.Field(i)
|
||||
cf.Set(f)
|
||||
idxInfo, ok := cfg.Fields[ref.Type().Field(i).Name]
|
||||
if ok {
|
||||
idxInfo.Value = &cf
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateField updates a single field
|
||||
func (n *node) UpdateField(data interface{}, fieldName string, value interface{}) error {
|
||||
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
|
||||
f := current.FieldByName(fieldName)
|
||||
if !f.IsValid() {
|
||||
return ErrNotFound
|
||||
}
|
||||
tf, _ := current.Type().FieldByName(fieldName)
|
||||
if tf.PkgPath != "" {
|
||||
return ErrNotFound
|
||||
}
|
||||
v := reflect.ValueOf(value)
|
||||
if v.Kind() != f.Kind() {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
f.Set(v)
|
||||
idxInfo, ok := cfg.Fields[fieldName]
|
||||
if ok {
|
||||
idxInfo.Value = &f
|
||||
idxInfo.IsZero = isZero(idxInfo.Value)
|
||||
idxInfo.ForceUpdate = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) update(data interface{}, fn func(*reflect.Value, *reflect.Value, *structConfig) error) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.IsZero {
|
||||
return ErrNoID
|
||||
}
|
||||
|
||||
current := reflect.New(reflect.Indirect(ref).Type())
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
err = n.WithTransaction(tx).One(cfg.ID.Name, cfg.ID.Value.Interface(), current.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref := reflect.ValueOf(data).Elem()
|
||||
cref := current.Elem()
|
||||
err = fn(&ref, &cref, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.save(tx, cfg, current.Interface(), true)
|
||||
})
|
||||
}
|
||||
|
||||
// Drop a bucket
|
||||
func (n *node) Drop(data interface{}) error {
|
||||
var bucketName string
|
||||
|
||||
v := reflect.ValueOf(data)
|
||||
if v.Kind() != reflect.String {
|
||||
info, err := extract(&v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName = info.Name
|
||||
} else {
|
||||
bucketName = v.Interface().(string)
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.drop(tx, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) drop(tx *bolt.Tx, bucketName string) error {
|
||||
bucket := n.GetBucket(tx)
|
||||
if bucket == nil {
|
||||
return tx.DeleteBucket([]byte(bucketName))
|
||||
}
|
||||
|
||||
return bucket.DeleteBucket([]byte(bucketName))
|
||||
}
|
||||
|
||||
// DeleteStruct deletes a structure from the associated bucket
|
||||
func (n *node) DeleteStruct(data interface{}) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.deleteStruct(tx, cfg, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) deleteStruct(tx *bolt.Tx, cfg *structConfig, id []byte) error {
|
||||
bucket := n.GetBucket(tx, cfg.Name)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range cfg.Fields {
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return bucket.Delete(id)
|
||||
}
|
142
vendor/github.com/asdine/storm/storm.go
generated
vendored
Normal file
142
vendor/github.com/asdine/storm/storm.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/asdine/storm/codec"
|
||||
"github.com/asdine/storm/codec/json"
|
||||
"github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
dbinfo = "__storm_db"
|
||||
metadataBucket = "__storm_metadata"
|
||||
)
|
||||
|
||||
// Defaults to json
|
||||
var defaultCodec = json.Codec
|
||||
|
||||
// Open opens a database at the given path with optional Storm options.
|
||||
func Open(path string, stormOptions ...func(*Options) error) (*DB, error) {
|
||||
var err error
|
||||
|
||||
var opts Options
|
||||
for _, option := range stormOptions {
|
||||
if err = option(&opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
s := DB{
|
||||
Bolt: opts.bolt,
|
||||
}
|
||||
|
||||
n := node{
|
||||
s: &s,
|
||||
codec: opts.codec,
|
||||
batchMode: opts.batchMode,
|
||||
rootBucket: opts.rootBucket,
|
||||
}
|
||||
|
||||
if n.codec == nil {
|
||||
n.codec = defaultCodec
|
||||
}
|
||||
|
||||
if opts.boltMode == 0 {
|
||||
opts.boltMode = 0600
|
||||
}
|
||||
|
||||
if opts.boltOptions == nil {
|
||||
opts.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
|
||||
}
|
||||
|
||||
s.Node = &n
|
||||
|
||||
// skip if UseDB option is used
|
||||
if s.Bolt == nil {
|
||||
s.Bolt, err = bolt.Open(path, opts.boltMode, opts.boltOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = s.checkVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
|
||||
// needed operations
|
||||
type DB struct {
|
||||
// The root node that points to the root bucket.
|
||||
Node
|
||||
|
||||
// Bolt is still easily accessible
|
||||
Bolt *bolt.DB
|
||||
}
|
||||
|
||||
// Close the database
|
||||
func (s *DB) Close() error {
|
||||
return s.Bolt.Close()
|
||||
}
|
||||
|
||||
func (s *DB) checkVersion() error {
|
||||
var v string
|
||||
err := s.Get(dbinfo, "version", &v)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
// for now, we only set the current version if it doesn't exist.
|
||||
// v1 and v2 database files are compatible.
|
||||
if v == "" {
|
||||
return s.Set(dbinfo, "version", Version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// toBytes turns an interface into a slice of bytes
|
||||
func toBytes(key interface{}, codec codec.MarshalUnmarshaler) ([]byte, error) {
|
||||
if key == nil {
|
||||
return nil, nil
|
||||
}
|
||||
switch t := key.(type) {
|
||||
case []byte:
|
||||
return t, nil
|
||||
case string:
|
||||
return []byte(t), nil
|
||||
case int:
|
||||
return numbertob(int64(t))
|
||||
case uint:
|
||||
return numbertob(uint64(t))
|
||||
case int8, int16, int32, int64, uint8, uint16, uint32, uint64:
|
||||
return numbertob(t)
|
||||
default:
|
||||
return codec.Marshal(key)
|
||||
}
|
||||
}
|
||||
|
||||
func numbertob(v interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
err := binary.Write(&buf, binary.BigEndian, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func numberfromb(raw []byte) (int64, error) {
|
||||
r := bytes.NewReader(raw)
|
||||
var to int64
|
||||
err := binary.Read(r, binary.BigEndian, &to)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return to, nil
|
||||
}
|
52
vendor/github.com/asdine/storm/transaction.go
generated
vendored
Normal file
52
vendor/github.com/asdine/storm/transaction.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package storm
|
||||
|
||||
import "github.com/coreos/bbolt"
|
||||
|
||||
// Tx is a transaction.
|
||||
type Tx interface {
|
||||
// Commit writes all changes to disk.
|
||||
Commit() error
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates.
|
||||
Rollback() error
|
||||
}
|
||||
|
||||
// Begin starts a new transaction.
|
||||
func (n node) Begin(writable bool) (Node, error) {
|
||||
var err error
|
||||
|
||||
n.tx, err = n.s.Bolt.Begin(writable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &n, nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates.
|
||||
func (n *node) Rollback() error {
|
||||
if n.tx == nil {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
err := n.tx.Rollback()
|
||||
if err == bolt.ErrTxClosed {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk.
|
||||
func (n *node) Commit() error {
|
||||
if n.tx == nil {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
err := n.tx.Commit()
|
||||
if err == bolt.ErrTxClosed {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
4
vendor/github.com/asdine/storm/version.go
generated
vendored
Normal file
4
vendor/github.com/asdine/storm/version.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
package storm
|
||||
|
||||
// Version of Storm
|
||||
const Version = "2.0.0"
|
4
vendor/github.com/coreos/bbolt/.gitignore
generated
vendored
Normal file
4
vendor/github.com/coreos/bbolt/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*.prof
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
20
vendor/github.com/coreos/bbolt/LICENSE
generated
vendored
Normal file
20
vendor/github.com/coreos/bbolt/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Ben Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
18
vendor/github.com/coreos/bbolt/Makefile
generated
vendored
Normal file
18
vendor/github.com/coreos/bbolt/Makefile
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
852
vendor/github.com/coreos/bbolt/README.md
generated
vendored
Normal file
852
vendor/github.com/coreos/bbolt/README.md
generated
vendored
Normal file
@@ -0,0 +1,852 @@
|
||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
||||
black box testing are used to ensure database consistency and thread safety.
|
||||
Bolt is currently in high-load production environments serving databases as
|
||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
||||
services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `Tx.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ := b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
constructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
18
vendor/github.com/coreos/bbolt/appveyor.yml
generated
vendored
Normal file
18
vendor/github.com/coreos/bbolt/appveyor.yml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
7
vendor/github.com/coreos/bbolt/bolt_386.go
generated
vendored
Normal file
7
vendor/github.com/coreos/bbolt/bolt_386.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
7
vendor/github.com/coreos/bbolt/bolt_amd64.go
generated
vendored
Normal file
7
vendor/github.com/coreos/bbolt/bolt_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
7
vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
7
vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
9
vendor/github.com/coreos/bbolt/bolt_arm64.go
generated
vendored
Normal file
9
vendor/github.com/coreos/bbolt/bolt_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build arm64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
10
vendor/github.com/coreos/bbolt/bolt_linux.go
generated
vendored
Normal file
10
vendor/github.com/coreos/bbolt/bolt_linux.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return syscall.Fdatasync(int(db.file.Fd()))
|
||||
}
|
27
vendor/github.com/coreos/bbolt/bolt_openbsd.go
generated
vendored
Normal file
27
vendor/github.com/coreos/bbolt/bolt_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
msAsync = 1 << iota // perform asynchronous writes
|
||||
msSync // perform synchronous writes
|
||||
msInvalidate // invalidate cached data
|
||||
)
|
||||
|
||||
func msync(db *DB) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fdatasync(db *DB) error {
|
||||
if db.data != nil {
|
||||
return msync(db)
|
||||
}
|
||||
return db.file.Sync()
|
||||
}
|
9
vendor/github.com/coreos/bbolt/bolt_ppc.go
generated
vendored
Normal file
9
vendor/github.com/coreos/bbolt/bolt_ppc.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build ppc
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
9
vendor/github.com/coreos/bbolt/bolt_ppc64.go
generated
vendored
Normal file
9
vendor/github.com/coreos/bbolt/bolt_ppc64.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build ppc64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
9
vendor/github.com/coreos/bbolt/bolt_ppc64le.go
generated
vendored
Normal file
9
vendor/github.com/coreos/bbolt/bolt_ppc64le.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build ppc64le
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
9
vendor/github.com/coreos/bbolt/bolt_s390x.go
generated
vendored
Normal file
9
vendor/github.com/coreos/bbolt/bolt_s390x.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build s390x
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
89
vendor/github.com/coreos/bbolt/bolt_unix.go
generated
vendored
Normal file
89
vendor/github.com/coreos/bbolt/bolt_unix.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// +build !windows,!plan9,!solaris
|
||||
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := syscall.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: This function is copied from stdlib because it is not available on darwin.
|
||||
func madvise(b []byte, advice int) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
90
vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
generated
vendored
Normal file
90
vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
144
vendor/github.com/coreos/bbolt/bolt_windows.go
generated
vendored
Normal file
144
vendor/github.com/coreos/bbolt/bolt_windows.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
)
|
||||
|
||||
const (
|
||||
lockExt = ".lock"
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
flagLockExclusive = 2
|
||||
flagLockFailImmediately = 1
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
||||
errLockViolation syscall.Errno = 0x21
|
||||
)
|
||||
|
||||
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
// Create a separate lock file on windows because a process
|
||||
// cannot share an exclusive lock on the same file. This is
|
||||
// needed during Tx.WriteTo().
|
||||
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.lockfile = f
|
||||
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
db.lockfile.Close()
|
||||
os.Remove(db.path+lockExt)
|
||||
return err
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func mmap(db *DB, sz int) error {
|
||||
if !db.readOnly {
|
||||
// Truncate the database to the size of the mmap.
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("truncate: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open a file mapping handle.
|
||||
sizelo := uint32(sz >> 32)
|
||||
sizehi := uint32(sz) & 0xffffffff
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||
if h == 0 {
|
||||
return os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Create the memory map.
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||
if addr == 0 {
|
||||
return os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
// Close mapping handle.
|
||||
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||
return os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
|
||||
// Convert to a byte array.
|
||||
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||
db.datasz = sz
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a pointer from a file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func munmap(db *DB) error {
|
||||
if db.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
}
|
8
vendor/github.com/coreos/bbolt/boltsync_unix.go
generated
vendored
Normal file
8
vendor/github.com/coreos/bbolt/boltsync_unix.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// +build !windows,!plan9,!linux,!openbsd
|
||||
|
||||
package bolt
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
748
vendor/github.com/coreos/bbolt/bucket.go
generated
vendored
Normal file
748
vendor/github.com/coreos/bbolt/bucket.go
generated
vendored
Normal file
@@ -0,0 +1,748 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxKeySize is the maximum length of a key, in bytes.
|
||||
MaxKeySize = 32768
|
||||
|
||||
// MaxValueSize is the maximum length of a value, in bytes.
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
minFillPercent = 0.1
|
||||
maxFillPercent = 1.0
|
||||
)
|
||||
|
||||
// DefaultFillPercent is the percentage that split pages are filled.
|
||||
// This value can be changed by setting Bucket.FillPercent.
|
||||
const DefaultFillPercent = 0.5
|
||||
|
||||
// Bucket represents a collection of key/value pairs inside the database.
|
||||
type Bucket struct {
|
||||
*bucket
|
||||
tx *Tx // the associated transaction
|
||||
buckets map[string]*Bucket // subbucket cache
|
||||
page *page // inline page reference
|
||||
rootNode *node // materialized node for the root page.
|
||||
nodes map[pgid]*node // node cache
|
||||
|
||||
// Sets the threshold for filling nodes when they split. By default,
|
||||
// the bucket will fill to 50% but it can be useful to increase this
|
||||
// amount if you know that your write workloads are mostly append-only.
|
||||
//
|
||||
// This is non-persisted across transactions so it must be set in every Tx.
|
||||
FillPercent float64
|
||||
}
|
||||
|
||||
// bucket represents the on-file representation of a bucket.
|
||||
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||
// then its root page can be stored inline in the "value", after the bucket
|
||||
// header. In the case of inline buckets, the "root" will be 0.
|
||||
type bucket struct {
|
||||
root pgid // page id of the bucket's root-level page
|
||||
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||
}
|
||||
|
||||
// newBucket returns a new bucket associated with a transaction.
|
||||
func newBucket(tx *Tx) Bucket {
|
||||
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||
if tx.writable {
|
||||
b.buckets = make(map[string]*Bucket)
|
||||
b.nodes = make(map[pgid]*node)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Tx returns the tx of the bucket.
|
||||
func (b *Bucket) Tx() *Tx {
|
||||
return b.tx
|
||||
}
|
||||
|
||||
// Root returns the root of the bucket.
|
||||
func (b *Bucket) Root() pgid {
|
||||
return b.root
|
||||
}
|
||||
|
||||
// Writable returns whether the bucket is writable.
|
||||
func (b *Bucket) Writable() bool {
|
||||
return b.tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the bucket.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (b *Bucket) Cursor() *Cursor {
|
||||
// Update transaction statistics.
|
||||
b.tx.stats.CursorCount++
|
||||
|
||||
// Allocate and return a cursor.
|
||||
return &Cursor{
|
||||
bucket: b,
|
||||
stack: make([]elemRef, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket retrieves a nested bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
if b.buckets != nil {
|
||||
if child := b.buckets[string(name)]; child != nil {
|
||||
return child
|
||||
}
|
||||
}
|
||||
|
||||
// Move cursor to key.
|
||||
c := b.Cursor()
|
||||
k, v, flags := c.seek(name)
|
||||
|
||||
// Return nil if the key doesn't exist or it is not a bucket.
|
||||
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise create a bucket and cache it.
|
||||
var child = b.openBucket(v)
|
||||
if b.buckets != nil {
|
||||
b.buckets[string(name)] = child
|
||||
}
|
||||
|
||||
return child
|
||||
}
|
||||
|
||||
// Helper method that re-interprets a sub-bucket value
|
||||
// from a parent into a Bucket
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
// Save a reference to the inline page if the bucket is inline.
|
||||
if child.root == 0 {
|
||||
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
}
|
||||
|
||||
return &child
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket at the given key and returns the new bucket.
|
||||
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if b.tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key.
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
} else {
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
var bucket = Bucket{
|
||||
bucket: &bucket{},
|
||||
rootNode: &node{isLeaf: true},
|
||||
FillPercent: DefaultFillPercent,
|
||||
}
|
||||
var value = bucket.write()
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||
|
||||
// Since subbuckets are not allowed on inline buckets, we need to
|
||||
// dereference the inline page, if it exists. This will cause the bucket
|
||||
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||
b.page = nil
|
||||
|
||||
return b.Bucket(key), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
child, err := b.CreateBucket(key)
|
||||
if err == ErrBucketExists {
|
||||
return b.Bucket(key), nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return child, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if bucket doesn't exist or is not a bucket.
|
||||
if !bytes.Equal(key, k) {
|
||||
return ErrBucketNotFound
|
||||
} else if (flags & bucketLeafFlag) == 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove cached copy.
|
||||
delete(b.buckets, string(key))
|
||||
|
||||
// Release all bucket pages to freelist.
|
||||
child.nodes = nil
|
||||
child.rootNode = nil
|
||||
child.free()
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the value for a key in the bucket.
|
||||
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||
// The returned value is only valid for the life of the transaction.
|
||||
func (b *Bucket) Get(key []byte) []byte {
|
||||
k, v, flags := b.Cursor().seek(key)
|
||||
|
||||
// Return nil if this is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If our target node isn't the same key as what's passed in then return nil.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Put sets the value for a key in the bucket.
|
||||
// If the key exist then its previous value will be overwritten.
|
||||
// Supplied value must remain valid for the life of the transaction.
|
||||
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
|
||||
func (b *Bucket) Put(key []byte, value []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return ErrKeyRequired
|
||||
} else if len(key) > MaxKeySize {
|
||||
return ErrKeyTooLarge
|
||||
} else if int64(len(value)) > MaxValueSize {
|
||||
return ErrValueTooLarge
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key with a bucket value.
|
||||
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the bucket.
|
||||
// If the key does not exist then nothing is done and a nil error is returned.
|
||||
// Returns an error if the bucket was created from a read-only transaction.
|
||||
func (b *Bucket) Delete(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
||||
return 0, ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return 0, ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence++
|
||||
return b.bucket.sequence, nil
|
||||
}
|
||||
|
||||
// ForEach executes a function for each key/value pair in a bucket.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller. The provided function must not modify
|
||||
// the bucket; this will result in undefined behavior.
|
||||
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
c := b.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if err := fn(k, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns stats on a bucket.
|
||||
func (b *Bucket) Stats() BucketStats {
|
||||
var s, subStats BucketStats
|
||||
pageSize := b.tx.db.pageSize
|
||||
s.BucketN += 1
|
||||
if b.root == 0 {
|
||||
s.InlineBucketN += 1
|
||||
}
|
||||
b.forEachPage(func(p *page, depth int) {
|
||||
if (p.flags & leafPageFlag) != 0 {
|
||||
s.KeyN += int(p.count)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
used := pageHeaderSize
|
||||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
// of the last element's key/value equals to the total of the sizes
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
// Do that by iterating over all element headers
|
||||
// looking for the ones with the bucketLeafFlag.
|
||||
for i := uint16(0); i < p.count; i++ {
|
||||
e := p.leafPageElement(i)
|
||||
if (e.flags & bucketLeafFlag) != 0 {
|
||||
// For any bucket element, open the element value
|
||||
// and recursively call Stats on the contained bucket.
|
||||
subStats.Add(b.openBucket(e.value()).Stats())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (p.flags & branchPageFlag) != 0 {
|
||||
s.BranchPageN++
|
||||
lastElement := p.branchPageElement(p.count - 1)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
// Keep track of maximum page depth.
|
||||
if depth+1 > s.Depth {
|
||||
s.Depth = (depth + 1)
|
||||
}
|
||||
})
|
||||
|
||||
// Alloc stats can be computed from page counts and pageSize.
|
||||
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
|
||||
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
|
||||
|
||||
// Add the max depth of sub-buckets to get total nested depth.
|
||||
s.Depth += subStats.Depth
|
||||
// Add the stats for all sub-buckets
|
||||
s.Add(subStats)
|
||||
return s
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page in a bucket, including inline pages.
|
||||
func (b *Bucket) forEachPage(fn func(*page, int)) {
|
||||
// If we have an inline page then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise traverse the page hierarchy.
|
||||
b.tx.forEachPage(b.root, 0, fn)
|
||||
}
|
||||
|
||||
// forEachPageNode iterates over every page (or node) in a bucket.
|
||||
// This also includes inline pages.
|
||||
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
|
||||
// If we have an inline page or root node then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, nil, 0)
|
||||
return
|
||||
}
|
||||
b._forEachPageNode(b.root, 0, fn)
|
||||
}
|
||||
|
||||
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
|
||||
var p, n = b.pageNode(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, n, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if p != nil {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
b._forEachPageNode(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !n.isLeaf {
|
||||
for _, inode := range n.inodes {
|
||||
b._forEachPageNode(inode.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spill writes all the nodes for this bucket to dirty pages.
|
||||
func (b *Bucket) spill() error {
|
||||
// Spill all child buckets first.
|
||||
for name, child := range b.buckets {
|
||||
// If the child bucket is small enough and it has no child buckets then
|
||||
// write it inline into the parent bucket's page. Otherwise spill it
|
||||
// like a normal bucket and make the parent value a pointer to the page.
|
||||
var value []byte
|
||||
if child.inlineable() {
|
||||
child.free()
|
||||
value = child.write()
|
||||
} else {
|
||||
if err := child.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the child bucket header in this bucket.
|
||||
value = make([]byte, unsafe.Sizeof(bucket{}))
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *child.bucket
|
||||
}
|
||||
|
||||
// Skip writing the bucket if there are no materialized nodes.
|
||||
if child.rootNode == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update parent node.
|
||||
var c = b.Cursor()
|
||||
k, _, flags := c.seek([]byte(name))
|
||||
if !bytes.Equal([]byte(name), k) {
|
||||
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||
}
|
||||
if flags&bucketLeafFlag == 0 {
|
||||
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||
}
|
||||
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||
}
|
||||
|
||||
// Ignore if there's not a materialized root node.
|
||||
if b.rootNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill nodes.
|
||||
if err := b.rootNode.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.rootNode = b.rootNode.root()
|
||||
|
||||
// Update the root node for this bucket.
|
||||
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||
}
|
||||
b.root = b.rootNode.pgid
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// inlineable returns true if a bucket is small enough to be written inline
|
||||
// and if it contains no subbuckets. Otherwise returns false.
|
||||
func (b *Bucket) inlineable() bool {
|
||||
var n = b.rootNode
|
||||
|
||||
// Bucket must only contain a single leaf node.
|
||||
if n == nil || !n.isLeaf {
|
||||
return false
|
||||
}
|
||||
|
||||
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
} else if size > b.maxInlineBucketSize() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
func (b *Bucket) write() []byte {
|
||||
// Allocate the appropriate size.
|
||||
var n = b.rootNode
|
||||
var value = make([]byte, bucketHeaderSize+n.size())
|
||||
|
||||
// Write a bucket header.
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *b.bucket
|
||||
|
||||
// Convert byte slice to a fake page and write the root node.
|
||||
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
n.write(p)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// rebalance attempts to balance all nodes.
|
||||
func (b *Bucket) rebalance() {
|
||||
for _, n := range b.nodes {
|
||||
n.rebalance()
|
||||
}
|
||||
for _, child := range b.buckets {
|
||||
child.rebalance()
|
||||
}
|
||||
}
|
||||
|
||||
// node creates a node from a page and associates it with a given parent.
|
||||
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||
_assert(b.nodes != nil, "nodes map expected")
|
||||
|
||||
// Retrieve node if it's already been created.
|
||||
if n := b.nodes[pgid]; n != nil {
|
||||
return n
|
||||
}
|
||||
|
||||
// Otherwise create a node and cache it.
|
||||
n := &node{bucket: b, parent: parent}
|
||||
if parent == nil {
|
||||
b.rootNode = n
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
}
|
||||
|
||||
// Use the inline page if this is an inline bucket.
|
||||
var p = b.page
|
||||
if p == nil {
|
||||
p = b.tx.page(pgid)
|
||||
}
|
||||
|
||||
// Read the page into the node and cache it.
|
||||
n.read(p)
|
||||
b.nodes[pgid] = n
|
||||
|
||||
// Update statistics.
|
||||
b.tx.stats.NodeCount++
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// free recursively frees all pages in the bucket.
|
||||
func (b *Bucket) free() {
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var tx = b.tx
|
||||
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||
if p != nil {
|
||||
tx.db.freelist.free(tx.meta.txid, p)
|
||||
} else {
|
||||
n.free()
|
||||
}
|
||||
})
|
||||
b.root = 0
|
||||
}
|
||||
|
||||
// dereference removes all references to the old mmap.
|
||||
func (b *Bucket) dereference() {
|
||||
if b.rootNode != nil {
|
||||
b.rootNode.root().dereference()
|
||||
}
|
||||
|
||||
for _, child := range b.buckets {
|
||||
child.dereference()
|
||||
}
|
||||
}
|
||||
|
||||
// pageNode returns the in-memory node, if it exists.
|
||||
// Otherwise returns the underlying page.
|
||||
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||
// Inline buckets have a fake page embedded in their value so treat them
|
||||
// differently. We'll return the rootNode (if available) or the fake page.
|
||||
if b.root == 0 {
|
||||
if id != 0 {
|
||||
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||
}
|
||||
if b.rootNode != nil {
|
||||
return nil, b.rootNode
|
||||
}
|
||||
return b.page, nil
|
||||
}
|
||||
|
||||
// Check the node cache for non-inline buckets.
|
||||
if b.nodes != nil {
|
||||
if n := b.nodes[id]; n != nil {
|
||||
return nil, n
|
||||
}
|
||||
}
|
||||
|
||||
// Finally lookup the page from the transaction if no node is materialized.
|
||||
return b.tx.page(id), nil
|
||||
}
|
||||
|
||||
// BucketStats records statistics about resources used by a bucket.
|
||||
type BucketStats struct {
|
||||
// Page count statistics.
|
||||
BranchPageN int // number of logical branch pages
|
||||
BranchOverflowN int // number of physical branch overflow pages
|
||||
LeafPageN int // number of logical leaf pages
|
||||
LeafOverflowN int // number of physical leaf overflow pages
|
||||
|
||||
// Tree statistics.
|
||||
KeyN int // number of keys/value pairs
|
||||
Depth int // number of levels in B+tree
|
||||
|
||||
// Page size utilization.
|
||||
BranchAlloc int // bytes allocated for physical branch pages
|
||||
BranchInuse int // bytes actually used for branch data
|
||||
LeafAlloc int // bytes allocated for physical leaf pages
|
||||
LeafInuse int // bytes actually used for leaf data
|
||||
|
||||
// Bucket statistics
|
||||
BucketN int // total number of buckets including the top bucket
|
||||
InlineBucketN int // total number on inlined buckets
|
||||
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
|
||||
}
|
||||
|
||||
func (s *BucketStats) Add(other BucketStats) {
|
||||
s.BranchPageN += other.BranchPageN
|
||||
s.BranchOverflowN += other.BranchOverflowN
|
||||
s.LeafPageN += other.LeafPageN
|
||||
s.LeafOverflowN += other.LeafOverflowN
|
||||
s.KeyN += other.KeyN
|
||||
if s.Depth < other.Depth {
|
||||
s.Depth = other.Depth
|
||||
}
|
||||
s.BranchAlloc += other.BranchAlloc
|
||||
s.BranchInuse += other.BranchInuse
|
||||
s.LeafAlloc += other.LeafAlloc
|
||||
s.LeafInuse += other.LeafInuse
|
||||
|
||||
s.BucketN += other.BucketN
|
||||
s.InlineBucketN += other.InlineBucketN
|
||||
s.InlineBucketInuse += other.InlineBucketInuse
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
400
vendor/github.com/coreos/bbolt/cursor.go
generated
vendored
Normal file
400
vendor/github.com/coreos/bbolt/cursor.go
generated
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||
// Cursors see nested buckets with value == nil.
|
||||
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||
//
|
||||
// Keys and values returned from the cursor are only valid for the life of the transaction.
|
||||
//
|
||||
// Changing data while traversing with a cursor may cause it to be invalidated
|
||||
// and return unexpected keys and/or values. You must reposition your cursor
|
||||
// after mutating data.
|
||||
type Cursor struct {
|
||||
bucket *Bucket
|
||||
stack []elemRef
|
||||
}
|
||||
|
||||
// Bucket returns the bucket that this cursor was created from.
|
||||
func (c *Cursor) Bucket() *Bucket {
|
||||
return c.bucket
|
||||
}
|
||||
|
||||
// First moves the cursor to the first item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) First() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
c.first()
|
||||
|
||||
// If we land on an empty page then move to the next value.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
c.next()
|
||||
}
|
||||
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
|
||||
}
|
||||
|
||||
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
ref := elemRef{page: p, node: n}
|
||||
ref.index = ref.count() - 1
|
||||
c.stack = append(c.stack, ref)
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
||||
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.next()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Attempt to move back one element until we're successful.
|
||||
// Move up the stack as we hit the beginning of each page in our stack.
|
||||
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index > 0 {
|
||||
elem.index--
|
||||
break
|
||||
}
|
||||
c.stack = c.stack[:i]
|
||||
}
|
||||
|
||||
// If we've hit the end then return nil.
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Move down the stack to find the last element of the last leaf under this branch.
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used. If no keys
|
||||
// follow, a nil key is returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
k, v, flags := c.seek(seek)
|
||||
|
||||
// If we ended up after the last element of a page then move to the next one.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
|
||||
k, v, flags = c.next()
|
||||
}
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Delete removes the current key/value under the cursor from the bucket.
|
||||
// Delete fails if current key/value is a bucket or if the transaction is not writable.
|
||||
func (c *Cursor) Delete() error {
|
||||
if c.bucket.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !c.bucket.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
key, _, flags := c.keyValue()
|
||||
// Return an error if current value is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used.
|
||||
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Start from root page/node and traverse to correct page.
|
||||
c.stack = c.stack[:0]
|
||||
c.search(seek, c.bucket.root)
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
|
||||
// If the cursor is pointing to the end of page/node then return nil.
|
||||
if ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// If this is a bucket then return a nil value.
|
||||
return c.keyValue()
|
||||
}
|
||||
|
||||
// first moves the cursor to the first leaf element under the last page in the stack.
|
||||
func (c *Cursor) first() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
var ref = &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the first element to the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
}
|
||||
}
|
||||
|
||||
// last moves the cursor to the last leaf element under the last page in the stack.
|
||||
func (c *Cursor) last() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the last element in the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
|
||||
var nextRef = elemRef{page: p, node: n}
|
||||
nextRef.index = nextRef.count() - 1
|
||||
c.stack = append(c.stack, nextRef)
|
||||
}
|
||||
}
|
||||
|
||||
// next moves to the next leaf element and returns the key and value.
|
||||
// If the cursor is at the last leaf element then it stays there and returns nil.
|
||||
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||
for {
|
||||
// Attempt to move over one element until we're successful.
|
||||
// Move up the stack as we hit the end of each page in our stack.
|
||||
var i int
|
||||
for i = len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index < elem.count()-1 {
|
||||
elem.index++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we've hit the root page then stop and return. This will leave the
|
||||
// cursor on the last element of the last page.
|
||||
if i == -1 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Otherwise start from where we left off in the stack and find the
|
||||
// first element of the first leaf page.
|
||||
c.stack = c.stack[:i+1]
|
||||
c.first()
|
||||
|
||||
// If this is an empty page then restart and move back up the stack.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return c.keyValue()
|
||||
}
|
||||
}
|
||||
|
||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||
}
|
||||
e := elemRef{page: p, node: n}
|
||||
c.stack = append(c.stack, e)
|
||||
|
||||
// If we're on a leaf page/node then find the specific node.
|
||||
if e.isLeaf() {
|
||||
c.nsearch(key)
|
||||
return
|
||||
}
|
||||
|
||||
if n != nil {
|
||||
c.searchNode(key, n)
|
||||
return
|
||||
}
|
||||
c.searchPage(key, p)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchNode(key []byte, n *node) {
|
||||
var exact bool
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(n.inodes[i].key, key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, n.inodes[index].pgid)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchPage(key []byte, p *page) {
|
||||
// Binary search for the correct range.
|
||||
inodes := p.branchPageElements()
|
||||
|
||||
var exact bool
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(inodes[i].key(), key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, inodes[index].pgid)
|
||||
}
|
||||
|
||||
// nsearch searches the leaf node on the top of the stack for a key.
|
||||
func (c *Cursor) nsearch(key []byte) {
|
||||
e := &c.stack[len(c.stack)-1]
|
||||
p, n := e.page, e.node
|
||||
|
||||
// If we have a node then search its inodes.
|
||||
if n != nil {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
return bytes.Compare(n.inodes[i].key, key) != -1
|
||||
})
|
||||
e.index = index
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a page then search its leaf elements.
|
||||
inodes := p.leafPageElements()
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
return bytes.Compare(inodes[i].key(), key) != -1
|
||||
})
|
||||
e.index = index
|
||||
}
|
||||
|
||||
// keyValue returns the key and value of the current leaf element.
|
||||
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.count() == 0 || ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Retrieve value from node.
|
||||
if ref.node != nil {
|
||||
inode := &ref.node.inodes[ref.index]
|
||||
return inode.key, inode.value, inode.flags
|
||||
}
|
||||
|
||||
// Or retrieve value from page.
|
||||
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||
return elem.key(), elem.value(), elem.flags
|
||||
}
|
||||
|
||||
// node returns the node that the cursor is currently positioned on.
|
||||
func (c *Cursor) node() *node {
|
||||
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||
|
||||
// If the top of the stack is a leaf node then just return it.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
|
||||
return ref.node
|
||||
}
|
||||
|
||||
// Start from root and traverse down the hierarchy.
|
||||
var n = c.stack[0].node
|
||||
if n == nil {
|
||||
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(int(ref.index))
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
}
|
||||
|
||||
// elemRef represents a reference to an element on a given page/node.
|
||||
type elemRef struct {
|
||||
page *page
|
||||
node *node
|
||||
index int
|
||||
}
|
||||
|
||||
// isLeaf returns whether the ref is pointing at a leaf page/node.
|
||||
func (r *elemRef) isLeaf() bool {
|
||||
if r.node != nil {
|
||||
return r.node.isLeaf
|
||||
}
|
||||
return (r.page.flags & leafPageFlag) != 0
|
||||
}
|
||||
|
||||
// count returns the number of inodes or page elements.
|
||||
func (r *elemRef) count() int {
|
||||
if r.node != nil {
|
||||
return len(r.node.inodes)
|
||||
}
|
||||
return int(r.page.count)
|
||||
}
|
1036
vendor/github.com/coreos/bbolt/db.go
generated
vendored
Normal file
1036
vendor/github.com/coreos/bbolt/db.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
44
vendor/github.com/coreos/bbolt/doc.go
generated
vendored
Normal file
44
vendor/github.com/coreos/bbolt/doc.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
Package bolt implements a low-level key/value store in pure Go. It supports
|
||||
fully serializable transactions, ACID semantics, and lock-free MVCC with
|
||||
multiple readers and a single writer. Bolt can be used for projects that
|
||||
want a simple data store without the need to add large dependencies such as
|
||||
Postgres or MySQL.
|
||||
|
||||
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
|
||||
optimized for fast read access and does not require recovery in the event of a
|
||||
system crash. Transactions which have not finished committing will simply be
|
||||
rolled back in the event of a crash.
|
||||
|
||||
The design of Bolt is based on Howard Chu's LMDB database project.
|
||||
|
||||
Bolt currently works on Windows, Mac OS X, and Linux.
|
||||
|
||||
|
||||
Basics
|
||||
|
||||
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
|
||||
a collection of buckets and is represented by a single file on disk. A bucket is
|
||||
a collection of unique keys that are associated with values.
|
||||
|
||||
Transactions provide either read-only or read-write access to the database.
|
||||
Read-only transactions can retrieve key/value pairs and can use Cursors to
|
||||
iterate over the dataset sequentially. Read-write transactions can create and
|
||||
delete buckets and can insert and remove keys. Only one read-write transaction
|
||||
is allowed at a time.
|
||||
|
||||
|
||||
Caveats
|
||||
|
||||
The database uses a read-only, memory-mapped data file to ensure that
|
||||
applications cannot corrupt the database, however, this means that keys and
|
||||
values returned from Bolt cannot be changed. Writing to a read-only byte slice
|
||||
will cause Go to panic.
|
||||
|
||||
Keys and values retrieved from the database are only valid for the life of
|
||||
the transaction. When used outside the transaction, these byte slices can
|
||||
point to different data or can point to invalid memory which will cause a panic.
|
||||
|
||||
|
||||
*/
|
||||
package bolt
|
71
vendor/github.com/coreos/bbolt/errors.go
generated
vendored
Normal file
71
vendor/github.com/coreos/bbolt/errors.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package bolt
|
||||
|
||||
import "errors"
|
||||
|
||||
// These errors can be returned when opening or calling methods on a DB.
|
||||
var (
|
||||
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||
// is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = errors.New("database not open")
|
||||
|
||||
// ErrDatabaseOpen is returned when opening a database that is
|
||||
// already open.
|
||||
ErrDatabaseOpen = errors.New("database already open")
|
||||
|
||||
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||
// This typically occurs when a file is not a bolt database.
|
||||
ErrInvalid = errors.New("invalid database")
|
||||
|
||||
// ErrVersionMismatch is returned when the data file was created with a
|
||||
// different version of Bolt.
|
||||
ErrVersionMismatch = errors.New("version mismatch")
|
||||
|
||||
// ErrChecksum is returned when either meta page checksum does not match.
|
||||
ErrChecksum = errors.New("checksum error")
|
||||
|
||||
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||
// on the data file after the timeout passed to Open().
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
// These errors can occur when beginning or committing a Tx.
|
||||
var (
|
||||
// ErrTxNotWritable is returned when performing a write operation on a
|
||||
// read-only transaction.
|
||||
ErrTxNotWritable = errors.New("tx not writable")
|
||||
|
||||
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||
// that has already been committed or rolled back.
|
||||
ErrTxClosed = errors.New("tx closed")
|
||||
|
||||
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||
// read-only database.
|
||||
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||
)
|
||||
|
||||
// These errors can occur when putting or deleting a value or a bucket.
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||
ErrBucketExists = errors.New("bucket already exists")
|
||||
|
||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||
ErrBucketNameRequired = errors.New("bucket name required")
|
||||
|
||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||
ErrKeyRequired = errors.New("key required")
|
||||
|
||||
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||
ErrKeyTooLarge = errors.New("key too large")
|
||||
|
||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||
ErrValueTooLarge = errors.New("value too large")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||
// on an existing non-bucket key or when trying to create or delete a
|
||||
// non-bucket key on an existing bucket key.
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
)
|
248
vendor/github.com/coreos/bbolt/freelist.go
generated
vendored
Normal file
248
vendor/github.com/coreos/bbolt/freelist.go
generated
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// all returns a list of all free ids and all pending ids in one sorted list.
|
||||
func (f *freelist) all() []pgid {
|
||||
m := make(pgids, 0)
|
||||
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
|
||||
sort.Sort(m)
|
||||
return pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
ids := f.all()
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
if len(ids) == 0 {
|
||||
p.count = uint16(len(ids))
|
||||
} else if len(ids) < 0xFFFF {
|
||||
p.count = uint16(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool)
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
604
vendor/github.com/coreos/bbolt/node.go
generated
vendored
Normal file
604
vendor/github.com/coreos/bbolt/node.go
generated
vendored
Normal file
@@ -0,0 +1,604 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// node represents an in-memory, deserialized page.
|
||||
type node struct {
|
||||
bucket *Bucket
|
||||
isLeaf bool
|
||||
unbalanced bool
|
||||
spilled bool
|
||||
key []byte
|
||||
pgid pgid
|
||||
parent *node
|
||||
children nodes
|
||||
inodes inodes
|
||||
}
|
||||
|
||||
// root returns the top-level node this node is attached to.
|
||||
func (n *node) root() *node {
|
||||
if n.parent == nil {
|
||||
return n
|
||||
}
|
||||
return n.parent.root()
|
||||
}
|
||||
|
||||
// minKeys returns the minimum number of inodes this node should have.
|
||||
func (n *node) minKeys() int {
|
||||
if n.isLeaf {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
|
||||
// size returns the size of the node after serialization.
|
||||
func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
return branchPageElementSize
|
||||
}
|
||||
|
||||
// childAt returns the child node at a given index.
|
||||
func (n *node) childAt(index int) *node {
|
||||
if n.isLeaf {
|
||||
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||
}
|
||||
return n.bucket.node(n.inodes[index].pgid, n)
|
||||
}
|
||||
|
||||
// childIndex returns the index of a given child node.
|
||||
func (n *node) childIndex(child *node) int {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
|
||||
return index
|
||||
}
|
||||
|
||||
// numChildren returns the number of children.
|
||||
func (n *node) numChildren() int {
|
||||
return len(n.inodes)
|
||||
}
|
||||
|
||||
// nextSibling returns the next node with the same parent.
|
||||
func (n *node) nextSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index >= n.parent.numChildren()-1 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index + 1)
|
||||
}
|
||||
|
||||
// prevSibling returns the previous node with the same parent.
|
||||
func (n *node) prevSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index - 1)
|
||||
}
|
||||
|
||||
// put inserts a key/value.
|
||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||
if pgid >= n.bucket.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||
} else if len(oldKey) <= 0 {
|
||||
panic("put: zero-length old key")
|
||||
} else if len(newKey) <= 0 {
|
||||
panic("put: zero-length new key")
|
||||
}
|
||||
|
||||
// Find insertion index.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||
|
||||
// Add capacity and shift nodes if we don't have an exact match and need to insert.
|
||||
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
|
||||
if !exact {
|
||||
n.inodes = append(n.inodes, inode{})
|
||||
copy(n.inodes[index+1:], n.inodes[index:])
|
||||
}
|
||||
|
||||
inode := &n.inodes[index]
|
||||
inode.flags = flags
|
||||
inode.key = newKey
|
||||
inode.value = value
|
||||
inode.pgid = pgid
|
||||
_assert(len(inode.key) > 0, "put: zero-length inode key")
|
||||
}
|
||||
|
||||
// del removes a key from the node.
|
||||
func (n *node) del(key []byte) {
|
||||
// Find index of key.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
|
||||
|
||||
// Exit if the key isn't found.
|
||||
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete inode from the node.
|
||||
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
|
||||
|
||||
// Mark the node as needing rebalancing.
|
||||
n.unbalanced = true
|
||||
}
|
||||
|
||||
// read initializes the node from a page.
|
||||
func (n *node) read(p *page) {
|
||||
n.pgid = p.id
|
||||
n.isLeaf = ((p.flags & leafPageFlag) != 0)
|
||||
n.inodes = make(inodes, int(p.count))
|
||||
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
inode := &n.inodes[i]
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
inode.flags = elem.flags
|
||||
inode.key = elem.key()
|
||||
inode.value = elem.value()
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
inode.pgid = elem.pgid
|
||||
inode.key = elem.key()
|
||||
}
|
||||
_assert(len(inode.key) > 0, "read: zero-length inode key")
|
||||
}
|
||||
|
||||
// Save first key so we can find the node in the parent when we spill.
|
||||
if len(n.inodes) > 0 {
|
||||
n.key = n.inodes[0].key
|
||||
_assert(len(n.key) > 0, "read: zero-length node key")
|
||||
} else {
|
||||
n.key = nil
|
||||
}
|
||||
}
|
||||
|
||||
// write writes the items onto one or more pages.
|
||||
func (n *node) write(p *page) {
|
||||
// Initialize page.
|
||||
if n.isLeaf {
|
||||
p.flags |= leafPageFlag
|
||||
} else {
|
||||
p.flags |= branchPageFlag
|
||||
}
|
||||
|
||||
if len(n.inodes) >= 0xFFFF {
|
||||
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||
}
|
||||
p.count = uint16(len(n.inodes))
|
||||
|
||||
// Stop here if there are no items to write.
|
||||
if p.count == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
}
|
||||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
for {
|
||||
// Split node into two.
|
||||
a, b := node.splitTwo(pageSize)
|
||||
nodes = append(nodes, a)
|
||||
|
||||
// If we can't split then exit the loop.
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Set node to b so it gets split on the next iteration.
|
||||
node = b
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Determine the threshold before starting a new node.
|
||||
var fillPercent = n.bucket.FillPercent
|
||||
if fillPercent < minFillPercent {
|
||||
fillPercent = minFillPercent
|
||||
} else if fillPercent > maxFillPercent {
|
||||
fillPercent = maxFillPercent
|
||||
}
|
||||
threshold := int(float64(pageSize) * fillPercent)
|
||||
|
||||
// Determine split position and sizes of the two pages.
|
||||
splitIndex, _ := n.splitIndex(threshold)
|
||||
|
||||
// Split node into two separate nodes.
|
||||
// If there's no parent then we'll need to create one.
|
||||
if n.parent == nil {
|
||||
n.parent = &node{bucket: n.bucket, children: []*node{n}}
|
||||
}
|
||||
|
||||
// Create a new node and add it to the parent.
|
||||
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
|
||||
n.parent.children = append(n.parent.children, next)
|
||||
|
||||
// Split inodes across two nodes.
|
||||
next.inodes = n.inodes[splitIndex:]
|
||||
n.inodes = n.inodes[:splitIndex]
|
||||
|
||||
// Update the statistics.
|
||||
n.bucket.tx.stats.Split++
|
||||
|
||||
return n, next
|
||||
}
|
||||
|
||||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
break
|
||||
}
|
||||
|
||||
// Add the element size to the total size.
|
||||
sz += elsize
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// spill writes the nodes to dirty pages and splits nodes as it goes.
|
||||
// Returns an error if dirty pages cannot be allocated.
|
||||
func (n *node) spill() error {
|
||||
var tx = n.bucket.tx
|
||||
if n.spilled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill child nodes first. Child nodes can materialize sibling nodes in
|
||||
// the case of split-merge so we cannot use a range loop. We have to check
|
||||
// the children size on every loop iteration.
|
||||
sort.Sort(n.children)
|
||||
for i := 0; i < len(n.children); i++ {
|
||||
if err := n.children[i].spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We no longer need the child list because it's only used for spill tracking.
|
||||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||
node.pgid = 0
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the node.
|
||||
if p.id >= tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||
}
|
||||
node.pgid = p.id
|
||||
node.write(p)
|
||||
node.spilled = true
|
||||
|
||||
// Insert into parent inodes.
|
||||
if node.parent != nil {
|
||||
var key = node.key
|
||||
if key == nil {
|
||||
key = node.inodes[0].key
|
||||
}
|
||||
|
||||
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
|
||||
node.key = node.inodes[0].key
|
||||
_assert(len(node.key) > 0, "spill: zero-length node key")
|
||||
}
|
||||
|
||||
// Update the statistics.
|
||||
tx.stats.Spill++
|
||||
}
|
||||
|
||||
// If the root node split and created a new root then we need to spill that
|
||||
// as well. We'll clear out the children to make sure it doesn't try to respill.
|
||||
if n.parent != nil && n.parent.pgid == 0 {
|
||||
n.children = nil
|
||||
return n.parent.spill()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rebalance attempts to combine the node with sibling nodes if the node fill
|
||||
// size is below a threshold or if there are not enough keys.
|
||||
func (n *node) rebalance() {
|
||||
if !n.unbalanced {
|
||||
return
|
||||
}
|
||||
n.unbalanced = false
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.Rebalance++
|
||||
|
||||
// Ignore if node is above threshold (25%) and has enough keys.
|
||||
var threshold = n.bucket.tx.db.pageSize / 4
|
||||
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||
return
|
||||
}
|
||||
|
||||
// Root node has special handling.
|
||||
if n.parent == nil {
|
||||
// If root node is a branch and only has one node then collapse it.
|
||||
if !n.isLeaf && len(n.inodes) == 1 {
|
||||
// Move root's child up.
|
||||
child := n.bucket.node(n.inodes[0].pgid, n)
|
||||
n.isLeaf = child.isLeaf
|
||||
n.inodes = child.inodes[:]
|
||||
n.children = child.children
|
||||
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent = n
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old child.
|
||||
child.parent = nil
|
||||
delete(n.bucket.nodes, child.pgid)
|
||||
child.free()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If node has no keys then just remove it.
|
||||
if n.numChildren() == 0 {
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
n.parent.rebalance()
|
||||
return
|
||||
}
|
||||
|
||||
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||
|
||||
// Destination node is right sibling if idx == 0, otherwise left sibling.
|
||||
var target *node
|
||||
var useNextSibling = (n.parent.childIndex(n) == 0)
|
||||
if useNextSibling {
|
||||
target = n.nextSibling()
|
||||
} else {
|
||||
target = n.prevSibling()
|
||||
}
|
||||
|
||||
// If both this node and the target node are too small then merge them.
|
||||
if useNextSibling {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range target.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes from target and remove target.
|
||||
n.inodes = append(n.inodes, target.inodes...)
|
||||
n.parent.del(target.key)
|
||||
n.parent.removeChild(target)
|
||||
delete(n.bucket.nodes, target.pgid)
|
||||
target.free()
|
||||
} else {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = target
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes to target and remove node.
|
||||
target.inodes = append(target.inodes, n.inodes...)
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
}
|
||||
|
||||
// Either this node or the target node was deleted from the parent so rebalance it.
|
||||
n.parent.rebalance()
|
||||
}
|
||||
|
||||
// removes a node from the list of in-memory children.
|
||||
// This does not affect the inodes.
|
||||
func (n *node) removeChild(target *node) {
|
||||
for i, child := range n.children {
|
||||
if child == target {
|
||||
n.children = append(n.children[:i], n.children[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dereference causes the node to copy all its inode key/value references to heap memory.
|
||||
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
|
||||
func (n *node) dereference() {
|
||||
if n.key != nil {
|
||||
key := make([]byte, len(n.key))
|
||||
copy(key, n.key)
|
||||
n.key = key
|
||||
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||
}
|
||||
|
||||
for i := range n.inodes {
|
||||
inode := &n.inodes[i]
|
||||
|
||||
key := make([]byte, len(inode.key))
|
||||
copy(key, inode.key)
|
||||
inode.key = key
|
||||
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
|
||||
|
||||
value := make([]byte, len(inode.value))
|
||||
copy(value, inode.value)
|
||||
inode.value = value
|
||||
}
|
||||
|
||||
// Recursively dereference children.
|
||||
for _, child := range n.children {
|
||||
child.dereference()
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.NodeDeref++
|
||||
}
|
||||
|
||||
// free adds the node's underlying page to the freelist.
|
||||
func (n *node) free() {
|
||||
if n.pgid != 0 {
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||
n.pgid = 0
|
||||
}
|
||||
}
|
||||
|
||||
// dump writes the contents of the node to STDERR for debugging purposes.
|
||||
/*
|
||||
func (n *node) dump() {
|
||||
// Write node header.
|
||||
var typ = "branch"
|
||||
if n.isLeaf {
|
||||
typ = "leaf"
|
||||
}
|
||||
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
|
||||
|
||||
// Write out abbreviated version of each item.
|
||||
for _, item := range n.inodes {
|
||||
if n.isLeaf {
|
||||
if item.flags&bucketLeafFlag != 0 {
|
||||
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
|
||||
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
|
||||
} else {
|
||||
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
|
||||
}
|
||||
} else {
|
||||
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
|
||||
}
|
||||
}
|
||||
warn("")
|
||||
}
|
||||
*/
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
// to an element which hasn't been added to a page yet.
|
||||
type inode struct {
|
||||
flags uint32
|
||||
pgid pgid
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
type inodes []inode
|
178
vendor/github.com/coreos/bbolt/page.go
generated
vendored
Normal file
178
vendor/github.com/coreos/bbolt/page.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
leafPageFlag = 0x02
|
||||
metaPageFlag = 0x04
|
||||
freelistPageFlag = 0x10
|
||||
)
|
||||
|
||||
const (
|
||||
bucketLeafFlag = 0x01
|
||||
)
|
||||
|
||||
type pgid uint64
|
||||
|
||||
type page struct {
|
||||
id pgid
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
func (p *page) typ() string {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
return "branch"
|
||||
} else if (p.flags & leafPageFlag) != 0 {
|
||||
return "leaf"
|
||||
} else if (p.flags & metaPageFlag) != 0 {
|
||||
return "meta"
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
return "freelist"
|
||||
}
|
||||
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||
}
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
type pages []*page
|
||||
|
||||
func (s pages) Len() int { return len(s) }
|
||||
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||
|
||||
// branchPageElement represents a node on a branch page.
|
||||
type branchPageElement struct {
|
||||
pos uint32
|
||||
ksize uint32
|
||||
pgid pgid
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
type leafPageElement struct {
|
||||
flags uint32
|
||||
pos uint32
|
||||
ksize uint32
|
||||
vsize uint32
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
type PageInfo struct {
|
||||
ID int
|
||||
Type string
|
||||
Count int
|
||||
OverflowCount int
|
||||
}
|
||||
|
||||
type pgids []pgid
|
||||
|
||||
func (s pgids) Len() int { return len(s) }
|
||||
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// merge returns the sorted union of a and b.
|
||||
func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
} else if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
|
||||
// Create a list to hold all elements from both lists.
|
||||
merged := make(pgids, 0, len(a)+len(b))
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
if b[0] < a[0] {
|
||||
lead, follow = b, a
|
||||
}
|
||||
|
||||
// Continue while there are elements in the lead.
|
||||
for len(lead) > 0 {
|
||||
// Merge largest prefix of lead that is ahead of follow[0].
|
||||
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||
merged = append(merged, lead[:n]...)
|
||||
if n >= len(lead) {
|
||||
break
|
||||
}
|
||||
|
||||
// Swap lead and follow.
|
||||
lead, follow = follow, lead[n:]
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
merged = append(merged, follow...)
|
||||
|
||||
return merged
|
||||
}
|
682
vendor/github.com/coreos/bbolt/tx.go
generated
vendored
Normal file
682
vendor/github.com/coreos/bbolt/tx.go
generated
vendored
Normal file
@@ -0,0 +1,682 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txid represents the internal transaction identifier.
|
||||
type txid uint64
|
||||
|
||||
// Tx represents a read-only or read/write transaction on the database.
|
||||
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
||||
// Read/write transactions can create and remove buckets and create and remove keys.
|
||||
//
|
||||
// IMPORTANT: You must commit or rollback transactions when you are done with
|
||||
// them. Pages can not be reclaimed by the writer until no more transactions
|
||||
// are using them. A long running read transaction can cause the database to
|
||||
// quickly grow.
|
||||
type Tx struct {
|
||||
writable bool
|
||||
managed bool
|
||||
db *DB
|
||||
meta *meta
|
||||
root Bucket
|
||||
pages map[pgid]*page
|
||||
stats TxStats
|
||||
commitHandlers []func()
|
||||
|
||||
// WriteFlag specifies the flag for write-related methods like WriteTo().
|
||||
// Tx opens the database file with the specified flag to copy the data.
|
||||
//
|
||||
// By default, the flag is unset, which works well for mostly in-memory
|
||||
// workloads. For databases that are much larger than available RAM,
|
||||
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
|
||||
WriteFlag int
|
||||
}
|
||||
|
||||
// init initializes the transaction.
|
||||
func (tx *Tx) init(db *DB) {
|
||||
tx.db = db
|
||||
tx.pages = nil
|
||||
|
||||
// Copy the meta page since it can be changed by the writer.
|
||||
tx.meta = &meta{}
|
||||
db.meta().copy(tx.meta)
|
||||
|
||||
// Copy over the root bucket.
|
||||
tx.root = newBucket(tx)
|
||||
tx.root.bucket = &bucket{}
|
||||
*tx.root.bucket = tx.meta.root
|
||||
|
||||
// Increment the transaction id and add a page cache for writable transactions.
|
||||
if tx.writable {
|
||||
tx.pages = make(map[pgid]*page)
|
||||
tx.meta.txid += txid(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the transaction id.
|
||||
func (tx *Tx) ID() int {
|
||||
return int(tx.meta.txid)
|
||||
}
|
||||
|
||||
// DB returns a reference to the database that created the transaction.
|
||||
func (tx *Tx) DB() *DB {
|
||||
return tx.db
|
||||
}
|
||||
|
||||
// Size returns current database size in bytes as seen by this transaction.
|
||||
func (tx *Tx) Size() int64 {
|
||||
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
||||
}
|
||||
|
||||
// Writable returns whether the transaction can perform write operations.
|
||||
func (tx *Tx) Writable() bool {
|
||||
return tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the root bucket.
|
||||
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (tx *Tx) Cursor() *Cursor {
|
||||
return tx.root.Cursor()
|
||||
}
|
||||
|
||||
// Stats retrieves a copy of the current transaction statistics.
|
||||
func (tx *Tx) Stats() TxStats {
|
||||
return tx.stats
|
||||
}
|
||||
|
||||
// Bucket retrieves a bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) Bucket(name []byte) *Bucket {
|
||||
return tx.root.Bucket(name)
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket.
|
||||
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucket(name)
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucketIfNotExists(name)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
||||
func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
return tx.root.DeleteBucket(name)
|
||||
}
|
||||
|
||||
// ForEach executes a function for each bucket in the root.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
||||
func (tx *Tx) OnCommit(fn func()) {
|
||||
tx.commitHandlers = append(tx.commitHandlers, fn)
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk and updates the meta page.
|
||||
// Returns an error if a disk write error occurs, or if Commit is
|
||||
// called on a read-only transaction.
|
||||
func (tx *Tx) Commit() error {
|
||||
_assert(!tx.managed, "managed tx commit not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !tx.writable {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
||||
|
||||
// Rebalance nodes which have had deletions.
|
||||
var startTime = time.Now()
|
||||
tx.root.rebalance()
|
||||
if tx.stats.Rebalance > 0 {
|
||||
tx.stats.RebalanceTime += time.Since(startTime)
|
||||
}
|
||||
|
||||
// spill data onto dirty pages.
|
||||
startTime = time.Now()
|
||||
if err := tx.root.spill(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.SpillTime += time.Since(startTime)
|
||||
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
startTime = time.Now()
|
||||
if err := tx.write(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// If strict mode is enabled then perform a consistency check.
|
||||
// Only the first consistency error is reported in the panic.
|
||||
if tx.db.StrictMode {
|
||||
ch := tx.Check()
|
||||
var errs []string
|
||||
for {
|
||||
err, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
panic("check fail: " + strings.Join(errs, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// Write meta to disk.
|
||||
if err := tx.writeMeta(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.WriteTime += time.Since(startTime)
|
||||
|
||||
// Finalize the transaction.
|
||||
tx.close()
|
||||
|
||||
// Execute commit handlers now that the locks have been removed.
|
||||
for _, fn := range tx.commitHandlers {
|
||||
fn()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
tx.rollback()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) rollback() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
|
||||
func (tx *Tx) close() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
// Grab freelist stats.
|
||||
var freelistFreeN = tx.db.freelist.free_count()
|
||||
var freelistPendingN = tx.db.freelist.pending_count()
|
||||
var freelistAlloc = tx.db.freelist.size()
|
||||
|
||||
// Remove transaction ref & writer lock.
|
||||
tx.db.rwtx = nil
|
||||
tx.db.rwlock.Unlock()
|
||||
|
||||
// Merge statistics.
|
||||
tx.db.statlock.Lock()
|
||||
tx.db.stats.FreePageN = freelistFreeN
|
||||
tx.db.stats.PendingPageN = freelistPendingN
|
||||
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
|
||||
tx.db.stats.FreelistInuse = freelistAlloc
|
||||
tx.db.stats.TxStats.add(&tx.stats)
|
||||
tx.db.statlock.Unlock()
|
||||
} else {
|
||||
tx.db.removeTx(tx)
|
||||
}
|
||||
|
||||
// Clear all references.
|
||||
tx.db = nil
|
||||
tx.meta = nil
|
||||
tx.root = Bucket{tx: tx}
|
||||
tx.pages = nil
|
||||
}
|
||||
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility. Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteTo writes the entire database to a writer.
|
||||
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
||||
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
// Attempt to open reader with WriteFlag
|
||||
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||
page.flags = metaPageFlag
|
||||
*page.meta() = *tx.meta
|
||||
|
||||
// Write meta 0.
|
||||
page.id = 0
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err := w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 0 copy: %s", err)
|
||||
}
|
||||
|
||||
// Write meta 1 with a lower transaction id.
|
||||
page.id = 1
|
||||
page.meta().txid -= 1
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err = w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 1 copy: %s", err)
|
||||
}
|
||||
|
||||
// Move past the meta pages in the file.
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
||||
return n, fmt.Errorf("seek: %s", err)
|
||||
}
|
||||
|
||||
// Copy data pages.
|
||||
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
|
||||
n += wn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
// A reader transaction is maintained during the copy so it is safe to continue
|
||||
// using the database while a copy is in progress.
|
||||
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Copy(f)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Check performs several consistency checks on the database for this transaction.
|
||||
// An error is returned if any inconsistency is found.
|
||||
//
|
||||
// It can be safely run concurrently on a writable transaction. However, this
|
||||
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||
// because of caching. This overhead can be removed if running on a read-only
|
||||
// transaction, however, it is not safe to execute other writer transactions at
|
||||
// the same time.
|
||||
func (tx *Tx) Check() <-chan error {
|
||||
ch := make(chan error)
|
||||
go tx.check(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
for _, id := range tx.db.freelist.all() {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
freed[id] = true
|
||||
}
|
||||
|
||||
// Track every reachable page.
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
tx.checkBucket(&tx.root, reachable, freed, ch)
|
||||
|
||||
// Ensure all pages below high water mark are either reachable or freed.
|
||||
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||
_, isReachable := reachable[i]
|
||||
if !isReachable && !freed[i] {
|
||||
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel to signal completion.
|
||||
close(ch)
|
||||
}
|
||||
|
||||
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
||||
// Ignore inline buckets.
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Check every page used by this bucket.
|
||||
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||
if p.id > tx.meta.pgid {
|
||||
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
||||
}
|
||||
|
||||
// Ensure each page is only referenced once.
|
||||
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||
var id = p.id + i
|
||||
if _, ok := reachable[id]; ok {
|
||||
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
||||
}
|
||||
reachable[id] = p
|
||||
}
|
||||
|
||||
// We should only encounter un-freed leaf and branch pages.
|
||||
if freed[p.id] {
|
||||
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
||||
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
||||
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
||||
}
|
||||
})
|
||||
|
||||
// Check each bucket within this bucket.
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
if child := b.Bucket(k); child != nil {
|
||||
tx.checkBucket(child, reachable, freed, ch)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save to our page cache.
|
||||
tx.pages[p.id] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount++
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// write writes any dirty pages to disk.
|
||||
func (tx *Tx) write() error {
|
||||
// Sort pages by id.
|
||||
pages := make(pages, 0, len(tx.pages))
|
||||
for _, p := range tx.pages {
|
||||
pages = append(pages, p)
|
||||
}
|
||||
// Clear out page cache early.
|
||||
tx.pages = make(map[pgid]*page)
|
||||
sort.Sort(pages)
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore file sync if flag is set on DB.
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Put small pages back to page pool.
|
||||
for _, p := range pages {
|
||||
// Ignore page sizes over 1 page.
|
||||
// These are allocated using make() instead of the page pool.
|
||||
if int(p.overflow) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
buf[i] = 0
|
||||
}
|
||||
tx.db.pagePool.Put(buf)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMeta writes the meta to the disk.
|
||||
func (tx *Tx) writeMeta() error {
|
||||
// Create a temporary buffer for the meta page.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
p := tx.db.pageInBuffer(buf, 0)
|
||||
tx.meta.write(p)
|
||||
|
||||
// Write the meta page to file.
|
||||
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
||||
return err
|
||||
}
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// page returns a reference to the page with a given id.
|
||||
// If page has been written to then a temporary buffered page is returned.
|
||||
func (tx *Tx) page(id pgid) *page {
|
||||
// Check the dirty pages first.
|
||||
if tx.pages != nil {
|
||||
if p, ok := tx.pages[id]; ok {
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise return directly from the mmap.
|
||||
return tx.db.page(id)
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page within a given page and executes a function.
|
||||
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||
p := tx.page(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
tx.forEachPage(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Page returns page information for a given page number.
|
||||
// This is only safe for concurrent use when used by a writable transaction.
|
||||
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||
if tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if pgid(id) >= tx.meta.pgid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the page info.
|
||||
p := tx.db.page(pgid(id))
|
||||
info := &PageInfo{
|
||||
ID: id,
|
||||
Count: int(p.count),
|
||||
OverflowCount: int(p.overflow),
|
||||
}
|
||||
|
||||
// Determine the type (or if it's free).
|
||||
if tx.db.freelist.freed(pgid(id)) {
|
||||
info.Type = "free"
|
||||
} else {
|
||||
info.Type = p.typ()
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// TxStats represents statistics about the actions performed by the transaction.
|
||||
type TxStats struct {
|
||||
// Page statistics.
|
||||
PageCount int // number of page allocations
|
||||
PageAlloc int // total bytes allocated
|
||||
|
||||
// Cursor statistics.
|
||||
CursorCount int // number of cursors created
|
||||
|
||||
// Node statistics
|
||||
NodeCount int // number of node allocations
|
||||
NodeDeref int // number of node dereferences
|
||||
|
||||
// Rebalance statistics.
|
||||
Rebalance int // number of node rebalances
|
||||
RebalanceTime time.Duration // total time spent rebalancing
|
||||
|
||||
// Split/Spill statistics.
|
||||
Split int // number of nodes split
|
||||
Spill int // number of nodes spilled
|
||||
SpillTime time.Duration // total time spent spilling
|
||||
|
||||
// Write statistics.
|
||||
Write int // number of writes performed
|
||||
WriteTime time.Duration // total time spent writing to disk
|
||||
}
|
||||
|
||||
func (s *TxStats) add(other *TxStats) {
|
||||
s.PageCount += other.PageCount
|
||||
s.PageAlloc += other.PageAlloc
|
||||
s.CursorCount += other.CursorCount
|
||||
s.NodeCount += other.NodeCount
|
||||
s.NodeDeref += other.NodeDeref
|
||||
s.Rebalance += other.Rebalance
|
||||
s.RebalanceTime += other.RebalanceTime
|
||||
s.Split += other.Split
|
||||
s.Spill += other.Spill
|
||||
s.SpillTime += other.SpillTime
|
||||
s.Write += other.Write
|
||||
s.WriteTime += other.WriteTime
|
||||
}
|
||||
|
||||
// Sub calculates and returns the difference between two sets of transaction stats.
|
||||
// This is useful when obtaining stats at two different points and time and
|
||||
// you need the performance counters that occurred within that time span.
|
||||
func (s *TxStats) Sub(other *TxStats) TxStats {
|
||||
var diff TxStats
|
||||
diff.PageCount = s.PageCount - other.PageCount
|
||||
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
||||
diff.CursorCount = s.CursorCount - other.CursorCount
|
||||
diff.NodeCount = s.NodeCount - other.NodeCount
|
||||
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
||||
diff.Rebalance = s.Rebalance - other.Rebalance
|
||||
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
||||
diff.Split = s.Split - other.Split
|
||||
diff.Spill = s.Spill - other.Spill
|
||||
diff.SpillTime = s.SpillTime - other.SpillTime
|
||||
diff.Write = s.Write - other.Write
|
||||
diff.WriteTime = s.WriteTime - other.WriteTime
|
||||
return diff
|
||||
}
|
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/sys/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/sys/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/sys/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/sys/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
2
vendor/golang.org/x/sys/unix/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/sys/unix/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
_obj/
|
||||
unix.test
|
173
vendor/golang.org/x/sys/unix/README.md
generated
vendored
Normal file
173
vendor/golang.org/x/sys/unix/README.md
generated
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
# Building `sys/unix`
|
||||
|
||||
The sys/unix package provides access to the raw system call interface of the
|
||||
underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
|
||||
|
||||
Porting Go to a new architecture/OS combination or adding syscalls, types, or
|
||||
constants to an existing architecture/OS pair requires some manual effort;
|
||||
however, there are tools that automate much of the process.
|
||||
|
||||
## Build Systems
|
||||
|
||||
There are currently two ways we generate the necessary files. We are currently
|
||||
migrating the build system to use containers so the builds are reproducible.
|
||||
This is being done on an OS-by-OS basis. Please update this documentation as
|
||||
components of the build system change.
|
||||
|
||||
### Old Build System (currently for `GOOS != "Linux" || GOARCH == "sparc64"`)
|
||||
|
||||
The old build system generates the Go files based on the C header files
|
||||
present on your system. This means that files
|
||||
for a given GOOS/GOARCH pair must be generated on a system with that OS and
|
||||
architecture. This also means that the generated code can differ from system
|
||||
to system, based on differences in the header files.
|
||||
|
||||
To avoid this, if you are using the old build system, only generate the Go
|
||||
files on an installation with unmodified header files. It is also important to
|
||||
keep track of which version of the OS the files were generated from (ex.
|
||||
Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
|
||||
and have each OS upgrade correspond to a single change.
|
||||
|
||||
To build the files for your current OS and architecture, make sure GOOS and
|
||||
GOARCH are set correctly and run `mkall.sh`. This will generate the files for
|
||||
your specific system. Running `mkall.sh -n` shows the commands that will be run.
|
||||
|
||||
Requirements: bash, perl, go
|
||||
|
||||
### New Build System (currently for `GOOS == "Linux" && GOARCH != "sparc64"`)
|
||||
|
||||
The new build system uses a Docker container to generate the go files directly
|
||||
from source checkouts of the kernel and various system libraries. This means
|
||||
that on any platform that supports Docker, all the files using the new build
|
||||
system can be generated at once, and generated files will not change based on
|
||||
what the person running the scripts has installed on their computer.
|
||||
|
||||
The OS specific files for the new build system are located in the `${GOOS}`
|
||||
directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
|
||||
the kernel or system library updates, modify the Dockerfile at
|
||||
`${GOOS}/Dockerfile` to checkout the new release of the source.
|
||||
|
||||
To build all the files under the new build system, you must be on an amd64/Linux
|
||||
system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
|
||||
then generate all of the files for all of the GOOS/GOARCH pairs in the new build
|
||||
system. Running `mkall.sh -n` shows the commands that will be run.
|
||||
|
||||
Requirements: bash, perl, go, docker
|
||||
|
||||
## Component files
|
||||
|
||||
This section describes the various files used in the code generation process.
|
||||
It also contains instructions on how to modify these files to add a new
|
||||
architecture/OS or to add additional syscalls, types, or constants. Note that
|
||||
if you are using the new build system, the scripts cannot be called normally.
|
||||
They must be called from within the docker container.
|
||||
|
||||
### asm files
|
||||
|
||||
The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
|
||||
call dispatch. There are three entry points:
|
||||
```
|
||||
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
|
||||
func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
|
||||
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
|
||||
```
|
||||
The first and second are the standard ones; they differ only in how many
|
||||
arguments can be passed to the kernel. The third is for low-level use by the
|
||||
ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
|
||||
let it know that a system call is running.
|
||||
|
||||
When porting Go to an new architecture/OS, this file must be implemented for
|
||||
each GOOS/GOARCH pair.
|
||||
|
||||
### mksysnum
|
||||
|
||||
Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl`
|
||||
for the old system). This script takes in a list of header files containing the
|
||||
syscall number declarations and parses them to produce the corresponding list of
|
||||
Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
|
||||
constants.
|
||||
|
||||
Adding new syscall numbers is mostly done by running the build on a sufficiently
|
||||
new installation of the target OS (or updating the source checkouts for the
|
||||
new build system). However, depending on the OS, you make need to update the
|
||||
parsing in mksysnum.
|
||||
|
||||
### mksyscall.pl
|
||||
|
||||
The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
|
||||
hand-written Go files which implement system calls (for unix, the specific OS,
|
||||
or the specific OS/Architecture pair respectively) that need special handling
|
||||
and list `//sys` comments giving prototypes for ones that can be generated.
|
||||
|
||||
The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts
|
||||
them into syscalls. This requires the name of the prototype in the comment to
|
||||
match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
|
||||
prototype can be exported (capitalized) or not.
|
||||
|
||||
Adding a new syscall often just requires adding a new `//sys` function prototype
|
||||
with the desired arguments and a capitalized name so it is exported. However, if
|
||||
you want the interface to the syscall to be different, often one will make an
|
||||
unexported `//sys` prototype, an then write a custom wrapper in
|
||||
`syscall_${GOOS}.go`.
|
||||
|
||||
### types files
|
||||
|
||||
For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
|
||||
`types_${GOOS}.go` on the old system). This file includes standard C headers and
|
||||
creates Go type aliases to the corresponding C types. The file is then fed
|
||||
through godef to get the Go compatible definitions. Finally, the generated code
|
||||
is fed though mkpost.go to format the code correctly and remove any hidden or
|
||||
private identifiers. This cleaned-up code is written to
|
||||
`ztypes_${GOOS}_${GOARCH}.go`.
|
||||
|
||||
The hardest part about preparing this file is figuring out which headers to
|
||||
include and which symbols need to be `#define`d to get the actual data
|
||||
structures that pass through to the kernel system calls. Some C libraries
|
||||
preset alternate versions for binary compatibility and translate them on the
|
||||
way in and out of system calls, but there is almost always a `#define` that can
|
||||
get the real ones.
|
||||
See `types_darwin.go` and `linux/types.go` for examples.
|
||||
|
||||
To add a new type, add in the necessary include statement at the top of the
|
||||
file (if it is not already there) and add in a type alias line. Note that if
|
||||
your type is significantly different on different architectures, you may need
|
||||
some `#if/#elif` macros in your include statements.
|
||||
|
||||
### mkerrors.sh
|
||||
|
||||
This script is used to generate the system's various constants. This doesn't
|
||||
just include the error numbers and error strings, but also the signal numbers
|
||||
an a wide variety of miscellaneous constants. The constants come from the list
|
||||
of include files in the `includes_${uname}` variable. A regex then picks out
|
||||
the desired `#define` statements, and generates the corresponding Go constants.
|
||||
The error numbers and strings are generated from `#include <errno.h>`, and the
|
||||
signal numbers and strings are generated from `#include <signal.h>`. All of
|
||||
these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
|
||||
`_errors.c`, which prints out all the constants.
|
||||
|
||||
To add a constant, add the header that includes it to the appropriate variable.
|
||||
Then, edit the regex (if necessary) to match the desired constant. Avoid making
|
||||
the regex too broad to avoid matching unintended constants.
|
||||
|
||||
|
||||
## Generated files
|
||||
|
||||
### `zerror_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing all of the system's generated error numbers, error strings,
|
||||
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
|
||||
|
||||
### `zsyscall_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing all the generated syscalls for a specific GOOS and GOARCH.
|
||||
Generated by `mksyscall.pl` (see above).
|
||||
|
||||
### `zsysnum_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A list of numeric constants for all the syscall number of the specific GOOS
|
||||
and GOARCH. Generated by mksysnum (see above).
|
||||
|
||||
### `ztypes_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing Go types for passing into (or returning from) syscalls.
|
||||
Generated by godefs and the types file (see above).
|
124
vendor/golang.org/x/sys/unix/affinity_linux.go
generated
vendored
Normal file
124
vendor/golang.org/x/sys/unix/affinity_linux.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// CPU affinity functions
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
|
||||
|
||||
// CPUSet represents a CPU affinity mask.
|
||||
type CPUSet [cpuSetSize]cpuMask
|
||||
|
||||
func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
|
||||
_, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
|
||||
if e != 0 {
|
||||
return errnoErr(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
|
||||
// If pid is 0 the calling thread is used.
|
||||
func SchedGetaffinity(pid int, set *CPUSet) error {
|
||||
return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
|
||||
}
|
||||
|
||||
// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
|
||||
// If pid is 0 the calling thread is used.
|
||||
func SchedSetaffinity(pid int, set *CPUSet) error {
|
||||
return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
|
||||
}
|
||||
|
||||
// Zero clears the set s, so that it contains no CPUs.
|
||||
func (s *CPUSet) Zero() {
|
||||
for i := range s {
|
||||
s[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func cpuBitsIndex(cpu int) int {
|
||||
return cpu / _NCPUBITS
|
||||
}
|
||||
|
||||
func cpuBitsMask(cpu int) cpuMask {
|
||||
return cpuMask(1 << (uint(cpu) % _NCPUBITS))
|
||||
}
|
||||
|
||||
// Set adds cpu to the set s.
|
||||
func (s *CPUSet) Set(cpu int) {
|
||||
i := cpuBitsIndex(cpu)
|
||||
if i < len(s) {
|
||||
s[i] |= cpuBitsMask(cpu)
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes cpu from the set s.
|
||||
func (s *CPUSet) Clear(cpu int) {
|
||||
i := cpuBitsIndex(cpu)
|
||||
if i < len(s) {
|
||||
s[i] &^= cpuBitsMask(cpu)
|
||||
}
|
||||
}
|
||||
|
||||
// IsSet reports whether cpu is in the set s.
|
||||
func (s *CPUSet) IsSet(cpu int) bool {
|
||||
i := cpuBitsIndex(cpu)
|
||||
if i < len(s) {
|
||||
return s[i]&cpuBitsMask(cpu) != 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Count returns the number of CPUs in the set s.
|
||||
func (s *CPUSet) Count() int {
|
||||
c := 0
|
||||
for _, b := range s {
|
||||
c += onesCount64(uint64(b))
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64.
|
||||
// Once this package can require Go 1.9, we can delete this
|
||||
// and update the caller to use bits.OnesCount64.
|
||||
func onesCount64(x uint64) int {
|
||||
const m0 = 0x5555555555555555 // 01010101 ...
|
||||
const m1 = 0x3333333333333333 // 00110011 ...
|
||||
const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ...
|
||||
const m3 = 0x00ff00ff00ff00ff // etc.
|
||||
const m4 = 0x0000ffff0000ffff
|
||||
|
||||
// Implementation: Parallel summing of adjacent bits.
|
||||
// See "Hacker's Delight", Chap. 5: Counting Bits.
|
||||
// The following pattern shows the general approach:
|
||||
//
|
||||
// x = x>>1&(m0&m) + x&(m0&m)
|
||||
// x = x>>2&(m1&m) + x&(m1&m)
|
||||
// x = x>>4&(m2&m) + x&(m2&m)
|
||||
// x = x>>8&(m3&m) + x&(m3&m)
|
||||
// x = x>>16&(m4&m) + x&(m4&m)
|
||||
// x = x>>32&(m5&m) + x&(m5&m)
|
||||
// return int(x)
|
||||
//
|
||||
// Masking (& operations) can be left away when there's no
|
||||
// danger that a field's sum will carry over into the next
|
||||
// field: Since the result cannot be > 64, 8 bits is enough
|
||||
// and we can ignore the masks for the shifts by 8 and up.
|
||||
// Per "Hacker's Delight", the first line can be simplified
|
||||
// more, but it saves at best one instruction, so we leave
|
||||
// it alone for clarity.
|
||||
const m = 1<<64 - 1
|
||||
x = x>>1&(m0&m) + x&(m0&m)
|
||||
x = x>>2&(m1&m) + x&(m1&m)
|
||||
x = (x>>4 + x) & (m2 & m)
|
||||
x += x >> 8
|
||||
x += x >> 16
|
||||
x += x >> 32
|
||||
return int(x) & (1<<7 - 1)
|
||||
}
|
29
vendor/golang.org/x/sys/unix/asm_darwin_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_darwin_386.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
30
vendor/golang.org/x/sys/unix/asm_darwin_arm.s
generated
vendored
Normal file
30
vendor/golang.org/x/sys/unix/asm_darwin_arm.s
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
// +build arm,darwin
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for ARM, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
30
vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
generated
vendored
Normal file
30
vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
// +build arm64,darwin
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, DragonFly
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-64
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-112
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-64
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_freebsd_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_freebsd_386.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, FreeBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, FreeBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for ARM, FreeBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
65
vendor/golang.org/x/sys/unix/asm_linux_386.s
generated
vendored
Normal file
65
vendor/golang.org/x/sys/unix/asm_linux_386.s
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for 386, Linux
|
||||
//
|
||||
|
||||
// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
|
||||
// instead of the glibc-specific "CALL 0x10(GS)".
|
||||
#define INVOKE_SYSCALL INT $0x80
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
|
||||
CALL runtime·entersyscall(SB)
|
||||
MOVL trap+0(FP), AX // syscall entry
|
||||
MOVL a1+4(FP), BX
|
||||
MOVL a2+8(FP), CX
|
||||
MOVL a3+12(FP), DX
|
||||
MOVL $0, SI
|
||||
MOVL $0, DI
|
||||
INVOKE_SYSCALL
|
||||
MOVL AX, r1+16(FP)
|
||||
MOVL DX, r2+20(FP)
|
||||
CALL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
|
||||
MOVL trap+0(FP), AX // syscall entry
|
||||
MOVL a1+4(FP), BX
|
||||
MOVL a2+8(FP), CX
|
||||
MOVL a3+12(FP), DX
|
||||
MOVL $0, SI
|
||||
MOVL $0, DI
|
||||
INVOKE_SYSCALL
|
||||
MOVL AX, r1+16(FP)
|
||||
MOVL DX, r2+20(FP)
|
||||
RET
|
||||
|
||||
TEXT ·socketcall(SB),NOSPLIT,$0-36
|
||||
JMP syscall·socketcall(SB)
|
||||
|
||||
TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
|
||||
JMP syscall·rawsocketcall(SB)
|
||||
|
||||
TEXT ·seek(SB),NOSPLIT,$0-28
|
||||
JMP syscall·seek(SB)
|
57
vendor/golang.org/x/sys/unix/asm_linux_amd64.s
generated
vendored
Normal file
57
vendor/golang.org/x/sys/unix/asm_linux_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for AMD64, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
|
||||
CALL runtime·entersyscall(SB)
|
||||
MOVQ a1+8(FP), DI
|
||||
MOVQ a2+16(FP), SI
|
||||
MOVQ a3+24(FP), DX
|
||||
MOVQ $0, R10
|
||||
MOVQ $0, R8
|
||||
MOVQ $0, R9
|
||||
MOVQ trap+0(FP), AX // syscall entry
|
||||
SYSCALL
|
||||
MOVQ AX, r1+32(FP)
|
||||
MOVQ DX, r2+40(FP)
|
||||
CALL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
|
||||
MOVQ a1+8(FP), DI
|
||||
MOVQ a2+16(FP), SI
|
||||
MOVQ a3+24(FP), DX
|
||||
MOVQ $0, R10
|
||||
MOVQ $0, R8
|
||||
MOVQ $0, R9
|
||||
MOVQ trap+0(FP), AX // syscall entry
|
||||
SYSCALL
|
||||
MOVQ AX, r1+32(FP)
|
||||
MOVQ DX, r2+40(FP)
|
||||
RET
|
||||
|
||||
TEXT ·gettimeofday(SB),NOSPLIT,$0-16
|
||||
JMP syscall·gettimeofday(SB)
|
56
vendor/golang.org/x/sys/unix/asm_linux_arm.s
generated
vendored
Normal file
56
vendor/golang.org/x/sys/unix/asm_linux_arm.s
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for arm, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
|
||||
BL runtime·entersyscall(SB)
|
||||
MOVW trap+0(FP), R7
|
||||
MOVW a1+4(FP), R0
|
||||
MOVW a2+8(FP), R1
|
||||
MOVW a3+12(FP), R2
|
||||
MOVW $0, R3
|
||||
MOVW $0, R4
|
||||
MOVW $0, R5
|
||||
SWI $0
|
||||
MOVW R0, r1+16(FP)
|
||||
MOVW $0, R0
|
||||
MOVW R0, r2+20(FP)
|
||||
BL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
|
||||
MOVW trap+0(FP), R7 // syscall entry
|
||||
MOVW a1+4(FP), R0
|
||||
MOVW a2+8(FP), R1
|
||||
MOVW a3+12(FP), R2
|
||||
SWI $0
|
||||
MOVW R0, r1+16(FP)
|
||||
MOVW $0, R0
|
||||
MOVW R0, r2+20(FP)
|
||||
RET
|
||||
|
||||
TEXT ·seek(SB),NOSPLIT,$0-28
|
||||
B syscall·seek(SB)
|
52
vendor/golang.org/x/sys/unix/asm_linux_arm64.s
generated
vendored
Normal file
52
vendor/golang.org/x/sys/unix/asm_linux_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build arm64
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
|
||||
BL runtime·entersyscall(SB)
|
||||
MOVD a1+8(FP), R0
|
||||
MOVD a2+16(FP), R1
|
||||
MOVD a3+24(FP), R2
|
||||
MOVD $0, R3
|
||||
MOVD $0, R4
|
||||
MOVD $0, R5
|
||||
MOVD trap+0(FP), R8 // syscall entry
|
||||
SVC
|
||||
MOVD R0, r1+32(FP) // r1
|
||||
MOVD R1, r2+40(FP) // r2
|
||||
BL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
|
||||
MOVD a1+8(FP), R0
|
||||
MOVD a2+16(FP), R1
|
||||
MOVD a3+24(FP), R2
|
||||
MOVD $0, R3
|
||||
MOVD $0, R4
|
||||
MOVD $0, R5
|
||||
MOVD trap+0(FP), R8 // syscall entry
|
||||
SVC
|
||||
MOVD R0, r1+32(FP)
|
||||
MOVD R1, r2+40(FP)
|
||||
RET
|
56
vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
generated
vendored
Normal file
56
vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build mips64 mips64le
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for mips64, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
|
||||
JAL runtime·entersyscall(SB)
|
||||
MOVV a1+8(FP), R4
|
||||
MOVV a2+16(FP), R5
|
||||
MOVV a3+24(FP), R6
|
||||
MOVV R0, R7
|
||||
MOVV R0, R8
|
||||
MOVV R0, R9
|
||||
MOVV trap+0(FP), R2 // syscall entry
|
||||
SYSCALL
|
||||
MOVV R2, r1+32(FP)
|
||||
MOVV R3, r2+40(FP)
|
||||
JAL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
|
||||
MOVV a1+8(FP), R4
|
||||
MOVV a2+16(FP), R5
|
||||
MOVV a3+24(FP), R6
|
||||
MOVV R0, R7
|
||||
MOVV R0, R8
|
||||
MOVV R0, R9
|
||||
MOVV trap+0(FP), R2 // syscall entry
|
||||
SYSCALL
|
||||
MOVV R2, r1+32(FP)
|
||||
MOVV R3, r2+40(FP)
|
||||
RET
|
54
vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
generated
vendored
Normal file
54
vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build mips mipsle
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for mips, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
|
||||
JAL runtime·entersyscall(SB)
|
||||
MOVW a1+4(FP), R4
|
||||
MOVW a2+8(FP), R5
|
||||
MOVW a3+12(FP), R6
|
||||
MOVW R0, R7
|
||||
MOVW trap+0(FP), R2 // syscall entry
|
||||
SYSCALL
|
||||
MOVW R2, r1+16(FP) // r1
|
||||
MOVW R3, r2+20(FP) // r2
|
||||
JAL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
|
||||
MOVW a1+4(FP), R4
|
||||
MOVW a2+8(FP), R5
|
||||
MOVW a3+12(FP), R6
|
||||
MOVW trap+0(FP), R2 // syscall entry
|
||||
SYSCALL
|
||||
MOVW R2, r1+16(FP)
|
||||
MOVW R3, r2+20(FP)
|
||||
RET
|
56
vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
generated
vendored
Normal file
56
vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build ppc64 ppc64le
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for ppc64, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
|
||||
BL runtime·entersyscall(SB)
|
||||
MOVD a1+8(FP), R3
|
||||
MOVD a2+16(FP), R4
|
||||
MOVD a3+24(FP), R5
|
||||
MOVD R0, R6
|
||||
MOVD R0, R7
|
||||
MOVD R0, R8
|
||||
MOVD trap+0(FP), R9 // syscall entry
|
||||
SYSCALL R9
|
||||
MOVD R3, r1+32(FP)
|
||||
MOVD R4, r2+40(FP)
|
||||
BL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
|
||||
MOVD a1+8(FP), R3
|
||||
MOVD a2+16(FP), R4
|
||||
MOVD a3+24(FP), R5
|
||||
MOVD R0, R6
|
||||
MOVD R0, R7
|
||||
MOVD R0, R8
|
||||
MOVD trap+0(FP), R9 // syscall entry
|
||||
SYSCALL R9
|
||||
MOVD R3, r1+32(FP)
|
||||
MOVD R4, r2+40(FP)
|
||||
RET
|
56
vendor/golang.org/x/sys/unix/asm_linux_s390x.s
generated
vendored
Normal file
56
vendor/golang.org/x/sys/unix/asm_linux_s390x.s
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x
|
||||
// +build linux
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for s390x, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
|
||||
BL runtime·entersyscall(SB)
|
||||
MOVD a1+8(FP), R2
|
||||
MOVD a2+16(FP), R3
|
||||
MOVD a3+24(FP), R4
|
||||
MOVD $0, R5
|
||||
MOVD $0, R6
|
||||
MOVD $0, R7
|
||||
MOVD trap+0(FP), R1 // syscall entry
|
||||
SYSCALL
|
||||
MOVD R2, r1+32(FP)
|
||||
MOVD R3, r2+40(FP)
|
||||
BL runtime·exitsyscall(SB)
|
||||
RET
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
|
||||
MOVD a1+8(FP), R2
|
||||
MOVD a2+16(FP), R3
|
||||
MOVD a3+24(FP), R4
|
||||
MOVD $0, R5
|
||||
MOVD $0, R6
|
||||
MOVD $0, R7
|
||||
MOVD trap+0(FP), R1 // syscall entry
|
||||
SYSCALL
|
||||
MOVD R2, r1+32(FP)
|
||||
MOVD R3, r2+40(FP)
|
||||
RET
|
29
vendor/golang.org/x/sys/unix/asm_netbsd_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_netbsd_386.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, NetBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, NetBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for ARM, NetBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_openbsd_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_openbsd_386.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, OpenBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, OpenBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for ARM, OpenBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
17
vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
generated
vendored
Normal file
17
vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
|
||||
//
|
||||
|
||||
TEXT ·sysvicall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·sysvicall6(SB)
|
||||
|
||||
TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·rawSysvicall6(SB)
|
35
vendor/golang.org/x/sys/unix/bluetooth_linux.go
generated
vendored
Normal file
35
vendor/golang.org/x/sys/unix/bluetooth_linux.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Bluetooth sockets and messages
|
||||
|
||||
package unix
|
||||
|
||||
// Bluetooth Protocols
|
||||
const (
|
||||
BTPROTO_L2CAP = 0
|
||||
BTPROTO_HCI = 1
|
||||
BTPROTO_SCO = 2
|
||||
BTPROTO_RFCOMM = 3
|
||||
BTPROTO_BNEP = 4
|
||||
BTPROTO_CMTP = 5
|
||||
BTPROTO_HIDP = 6
|
||||
BTPROTO_AVDTP = 7
|
||||
)
|
||||
|
||||
const (
|
||||
HCI_CHANNEL_RAW = 0
|
||||
HCI_CHANNEL_USER = 1
|
||||
HCI_CHANNEL_MONITOR = 2
|
||||
HCI_CHANNEL_CONTROL = 3
|
||||
)
|
||||
|
||||
// Socketoption Level
|
||||
const (
|
||||
SOL_BLUETOOTH = 0x112
|
||||
SOL_HCI = 0x0
|
||||
SOL_L2CAP = 0x6
|
||||
SOL_RFCOMM = 0x12
|
||||
SOL_SCO = 0x11
|
||||
)
|
195
vendor/golang.org/x/sys/unix/cap_freebsd.go
generated
vendored
Normal file
195
vendor/golang.org/x/sys/unix/cap_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
errorspkg "errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
|
||||
|
||||
const (
|
||||
// This is the version of CapRights this package understands. See C implementation for parallels.
|
||||
capRightsGoVersion = CAP_RIGHTS_VERSION_00
|
||||
capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
|
||||
capArSizeMax = capRightsGoVersion + 2
|
||||
)
|
||||
|
||||
var (
|
||||
bit2idx = []int{
|
||||
-1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
|
||||
4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
}
|
||||
)
|
||||
|
||||
func capidxbit(right uint64) int {
|
||||
return int((right >> 57) & 0x1f)
|
||||
}
|
||||
|
||||
func rightToIndex(right uint64) (int, error) {
|
||||
idx := capidxbit(right)
|
||||
if idx < 0 || idx >= len(bit2idx) {
|
||||
return -2, fmt.Errorf("index for right 0x%x out of range", right)
|
||||
}
|
||||
return bit2idx[idx], nil
|
||||
}
|
||||
|
||||
func caprver(right uint64) int {
|
||||
return int(right >> 62)
|
||||
}
|
||||
|
||||
func capver(rights *CapRights) int {
|
||||
return caprver(rights.Rights[0])
|
||||
}
|
||||
|
||||
func caparsize(rights *CapRights) int {
|
||||
return capver(rights) + 2
|
||||
}
|
||||
|
||||
// CapRightsSet sets the permissions in setrights in rights.
|
||||
func CapRightsSet(rights *CapRights, setrights []uint64) error {
|
||||
// This is essentially a copy of cap_rights_vset()
|
||||
if capver(rights) != CAP_RIGHTS_VERSION_00 {
|
||||
return fmt.Errorf("bad rights version %d", capver(rights))
|
||||
}
|
||||
|
||||
n := caparsize(rights)
|
||||
if n < capArSizeMin || n > capArSizeMax {
|
||||
return errorspkg.New("bad rights size")
|
||||
}
|
||||
|
||||
for _, right := range setrights {
|
||||
if caprver(right) != CAP_RIGHTS_VERSION_00 {
|
||||
return errorspkg.New("bad right version")
|
||||
}
|
||||
i, err := rightToIndex(right)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i >= n {
|
||||
return errorspkg.New("index overflow")
|
||||
}
|
||||
if capidxbit(rights.Rights[i]) != capidxbit(right) {
|
||||
return errorspkg.New("index mismatch")
|
||||
}
|
||||
rights.Rights[i] |= right
|
||||
if capidxbit(rights.Rights[i]) != capidxbit(right) {
|
||||
return errorspkg.New("index mismatch (after assign)")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CapRightsClear clears the permissions in clearrights from rights.
|
||||
func CapRightsClear(rights *CapRights, clearrights []uint64) error {
|
||||
// This is essentially a copy of cap_rights_vclear()
|
||||
if capver(rights) != CAP_RIGHTS_VERSION_00 {
|
||||
return fmt.Errorf("bad rights version %d", capver(rights))
|
||||
}
|
||||
|
||||
n := caparsize(rights)
|
||||
if n < capArSizeMin || n > capArSizeMax {
|
||||
return errorspkg.New("bad rights size")
|
||||
}
|
||||
|
||||
for _, right := range clearrights {
|
||||
if caprver(right) != CAP_RIGHTS_VERSION_00 {
|
||||
return errorspkg.New("bad right version")
|
||||
}
|
||||
i, err := rightToIndex(right)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i >= n {
|
||||
return errorspkg.New("index overflow")
|
||||
}
|
||||
if capidxbit(rights.Rights[i]) != capidxbit(right) {
|
||||
return errorspkg.New("index mismatch")
|
||||
}
|
||||
rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
|
||||
if capidxbit(rights.Rights[i]) != capidxbit(right) {
|
||||
return errorspkg.New("index mismatch (after assign)")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
|
||||
func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
|
||||
// This is essentially a copy of cap_rights_is_vset()
|
||||
if capver(rights) != CAP_RIGHTS_VERSION_00 {
|
||||
return false, fmt.Errorf("bad rights version %d", capver(rights))
|
||||
}
|
||||
|
||||
n := caparsize(rights)
|
||||
if n < capArSizeMin || n > capArSizeMax {
|
||||
return false, errorspkg.New("bad rights size")
|
||||
}
|
||||
|
||||
for _, right := range setrights {
|
||||
if caprver(right) != CAP_RIGHTS_VERSION_00 {
|
||||
return false, errorspkg.New("bad right version")
|
||||
}
|
||||
i, err := rightToIndex(right)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if i >= n {
|
||||
return false, errorspkg.New("index overflow")
|
||||
}
|
||||
if capidxbit(rights.Rights[i]) != capidxbit(right) {
|
||||
return false, errorspkg.New("index mismatch")
|
||||
}
|
||||
if (rights.Rights[i] & right) != right {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func capright(idx uint64, bit uint64) uint64 {
|
||||
return ((1 << (57 + idx)) | bit)
|
||||
}
|
||||
|
||||
// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
|
||||
// See man cap_rights_init(3) and rights(4).
|
||||
func CapRightsInit(rights []uint64) (*CapRights, error) {
|
||||
var r CapRights
|
||||
r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
|
||||
r.Rights[1] = capright(1, 0)
|
||||
|
||||
err := CapRightsSet(&r, rights)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
|
||||
// The capability rights on fd can never be increased by CapRightsLimit.
|
||||
// See man cap_rights_limit(2) and rights(4).
|
||||
func CapRightsLimit(fd uintptr, rights *CapRights) error {
|
||||
return capRightsLimit(int(fd), rights)
|
||||
}
|
||||
|
||||
// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
|
||||
// See man cap_rights_get(3) and rights(4).
|
||||
func CapRightsGet(fd uintptr) (*CapRights, error) {
|
||||
r, err := CapRightsInit(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = capRightsGet(capRightsGoVersion, int(fd), r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
13
vendor/golang.org/x/sys/unix/constants.go
generated
vendored
Normal file
13
vendor/golang.org/x/sys/unix/constants.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
R_OK = 0x4
|
||||
W_OK = 0x2
|
||||
X_OK = 0x1
|
||||
)
|
24
vendor/golang.org/x/sys/unix/dev_darwin.go
generated
vendored
Normal file
24
vendor/golang.org/x/sys/unix/dev_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Functions to access/create device major and minor numbers matching the
|
||||
// encoding used in Darwin's sys/types.h header.
|
||||
|
||||
package unix
|
||||
|
||||
// Major returns the major component of a Darwin device number.
|
||||
func Major(dev uint64) uint32 {
|
||||
return uint32((dev >> 24) & 0xff)
|
||||
}
|
||||
|
||||
// Minor returns the minor component of a Darwin device number.
|
||||
func Minor(dev uint64) uint32 {
|
||||
return uint32(dev & 0xffffff)
|
||||
}
|
||||
|
||||
// Mkdev returns a Darwin device number generated from the given major and minor
|
||||
// components.
|
||||
func Mkdev(major, minor uint32) uint64 {
|
||||
return (uint64(major) << 24) | uint64(minor)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user