Replace godep with dep

This commit is contained in:
Manuel de Brito Fontes 2017-10-06 17:26:14 -03:00
parent 1e7489927c
commit bf5616c65b
14883 changed files with 3937406 additions and 361781 deletions

11
vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md generated vendored Normal file
View file

@ -0,0 +1,11 @@
Before reporting an issue, please ensure you are using the latest release of fsnotify.
### Which operating system (GOOS) and version are you using?
Linux: lsb_release -a
macOS: sw_vers
Windows: systeminfo | findstr /B /C:OS
### Please describe the issue that occurred.
### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible.

View file

@ -0,0 +1,8 @@
#### What does this pull request do?
#### Where should the reviewer start?
#### How should this be manually tested?

42
vendor/gopkg.in/fsnotify.v1/example_test.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
package fsnotify_test
import (
"log"
"github.com/fsnotify/fsnotify"
)
func ExampleNewWatcher() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
done := make(chan bool)
go func() {
for {
select {
case event := <-watcher.Events:
log.Println("event:", event)
if event.Op&fsnotify.Write == fsnotify.Write {
log.Println("modified file:", event.Name)
}
case err := <-watcher.Errors:
log.Println("error:", err)
}
}
}()
err = watcher.Add("/tmp/foo")
if err != nil {
log.Fatal(err)
}
<-done
}

40
vendor/gopkg.in/fsnotify.v1/fsnotify_test.go generated vendored Normal file
View file

@ -0,0 +1,40 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
package fsnotify
import "testing"
func TestEventStringWithValue(t *testing.T) {
for opMask, expectedString := range map[Op]string{
Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
Rename: `"/usr/someFile": RENAME`,
Remove: `"/usr/someFile": REMOVE`,
Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
} {
event := Event{Name: "/usr/someFile", Op: opMask}
if event.String() != expectedString {
t.Fatalf("Expected %s, got: %v", expectedString, event.String())
}
}
}
func TestEventOpStringWithValue(t *testing.T) {
expectedOpString := "WRITE|CHMOD"
event := Event{Name: "someFile", Op: Write | Chmod}
if event.Op.String() != expectedOpString {
t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
}
}
func TestEventOpStringWithNoValue(t *testing.T) {
expectedOpString := ""
event := Event{Name: "testFile", Op: 0}
if event.Op.String() != expectedOpString {
t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
}
}

229
vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go generated vendored Normal file
View file

@ -0,0 +1,229 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"testing"
"time"
"golang.org/x/sys/unix"
)
type testFd [2]int
func makeTestFd(t *testing.T) testFd {
var tfd testFd
errno := unix.Pipe(tfd[:])
if errno != nil {
t.Fatalf("Failed to create pipe: %v", errno)
}
return tfd
}
func (tfd testFd) fd() int {
return tfd[0]
}
func (tfd testFd) closeWrite(t *testing.T) {
errno := unix.Close(tfd[1])
if errno != nil {
t.Fatalf("Failed to close write end of pipe: %v", errno)
}
}
func (tfd testFd) put(t *testing.T) {
buf := make([]byte, 10)
_, errno := unix.Write(tfd[1], buf)
if errno != nil {
t.Fatalf("Failed to write to pipe: %v", errno)
}
}
func (tfd testFd) get(t *testing.T) {
buf := make([]byte, 10)
_, errno := unix.Read(tfd[0], buf)
if errno != nil {
t.Fatalf("Failed to read from pipe: %v", errno)
}
}
func (tfd testFd) close() {
unix.Close(tfd[1])
unix.Close(tfd[0])
}
func makePoller(t *testing.T) (testFd, *fdPoller) {
tfd := makeTestFd(t)
poller, err := newFdPoller(tfd.fd())
if err != nil {
t.Fatalf("Failed to create poller: %v", err)
}
return tfd, poller
}
func TestPollerWithBadFd(t *testing.T) {
_, err := newFdPoller(-1)
if err != unix.EBADF {
t.Fatalf("Expected EBADF, got: %v", err)
}
}
func TestPollerWithData(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
tfd.put(t)
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
tfd.get(t)
}
func TestPollerWithWakeup(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
err := poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if ok {
t.Fatalf("expected poller to return false")
}
}
func TestPollerWithClose(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
tfd.closeWrite(t)
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
}
func TestPollerWithWakeupAndData(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
tfd.put(t)
err := poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
// both data and wakeup
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
// data is still in the buffer, wakeup is cleared
ok, err = poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
tfd.get(t)
// data is gone, only wakeup now
err = poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
ok, err = poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if ok {
t.Fatalf("expected poller to return false")
}
}
func TestPollerConcurrent(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
oks := make(chan bool)
live := make(chan bool)
defer close(live)
go func() {
defer close(oks)
for {
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
oks <- ok
if !<-live {
return
}
}
}()
// Try a write
select {
case <-time.After(50 * time.Millisecond):
case <-oks:
t.Fatalf("poller did not wait")
}
tfd.put(t)
if !<-oks {
t.Fatalf("expected true")
}
tfd.get(t)
live <- true
// Try a wakeup
select {
case <-time.After(50 * time.Millisecond):
case <-oks:
t.Fatalf("poller did not wait")
}
err := poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
if <-oks {
t.Fatalf("expected false")
}
live <- true
// Try a close
select {
case <-time.After(50 * time.Millisecond):
case <-oks:
t.Fatalf("poller did not wait")
}
tfd.closeWrite(t)
if !<-oks {
t.Fatalf("expected true")
}
tfd.get(t)
}

360
vendor/gopkg.in/fsnotify.v1/inotify_test.go generated vendored Normal file
View file

@ -0,0 +1,360 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
)
func TestInotifyCloseRightAway(t *testing.T) {
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
// Close immediately; it won't even reach the first unix.Read.
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func TestInotifyCloseSlightlyLater(t *testing.T) {
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
// Wait until readEvents has reached unix.Read, and Close.
<-time.After(50 * time.Millisecond)
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
w.Add(testDir)
// Wait until readEvents has reached unix.Read, and Close.
<-time.After(50 * time.Millisecond)
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func TestInotifyCloseAfterRead(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
err = w.Add(testDir)
if err != nil {
t.Fatalf("Failed to add .")
}
// Generate an event.
os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
// Wait for readEvents to read the event, then close the watcher.
<-time.After(50 * time.Millisecond)
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func isWatcherReallyClosed(t *testing.T, w *Watcher) {
select {
case err, ok := <-w.Errors:
if ok {
t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
}
default:
t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
}
select {
case _, ok := <-w.Events:
if ok {
t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
}
default:
t.Fatalf("w.Events would have blocked; readEvents is still alive!")
}
}
func TestInotifyCloseCreate(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testDir)
if err != nil {
t.Fatalf("Failed to add testDir: %v", err)
}
h, err := os.Create(filepath.Join(testDir, "testfile"))
if err != nil {
t.Fatalf("Failed to create file in testdir: %v", err)
}
h.Close()
select {
case _ = <-w.Events:
case err := <-w.Errors:
t.Fatalf("Error from watcher: %v", err)
case <-time.After(50 * time.Millisecond):
t.Fatalf("Took too long to wait for event")
}
// At this point, we've received one event, so the goroutine is ready.
// It's also blocking on unix.Read.
// Now we try to swap the file descriptor under its nose.
w.Close()
w, err = NewWatcher()
defer w.Close()
if err != nil {
t.Fatalf("Failed to create second watcher: %v", err)
}
<-time.After(50 * time.Millisecond)
err = w.Add(testDir)
if err != nil {
t.Fatalf("Error adding testDir again: %v", err)
}
}
// This test verifies the watcher can keep up with file creations/deletions
// when under load.
func TestInotifyStress(t *testing.T) {
maxNumToCreate := 1000
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
testFilePrefix := filepath.Join(testDir, "testfile")
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testDir)
if err != nil {
t.Fatalf("Failed to add testDir: %v", err)
}
doneChan := make(chan struct{})
// The buffer ensures that the file generation goroutine is never blocked.
errChan := make(chan error, 2*maxNumToCreate)
go func() {
for i := 0; i < maxNumToCreate; i++ {
testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
handle, err := os.Create(testFile)
if err != nil {
errChan <- fmt.Errorf("Create failed: %v", err)
continue
}
err = handle.Close()
if err != nil {
errChan <- fmt.Errorf("Close failed: %v", err)
continue
}
}
// If we delete a newly created file too quickly, inotify will skip the
// create event and only send the delete event.
time.Sleep(100 * time.Millisecond)
for i := 0; i < maxNumToCreate; i++ {
testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
err = os.Remove(testFile)
if err != nil {
errChan <- fmt.Errorf("Remove failed: %v", err)
}
}
close(doneChan)
}()
creates := 0
removes := 0
finished := false
after := time.After(10 * time.Second)
for !finished {
select {
case <-after:
t.Fatalf("Not done")
case <-doneChan:
finished = true
case err := <-errChan:
t.Fatalf("Got an error from file creator goroutine: %v", err)
case err := <-w.Errors:
t.Fatalf("Got an error from watcher: %v", err)
case evt := <-w.Events:
if !strings.HasPrefix(evt.Name, testFilePrefix) {
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
}
if evt.Op == Create {
creates++
}
if evt.Op == Remove {
removes++
}
}
}
// Drain remaining events from channels
count := 0
for count < 10 {
select {
case err := <-errChan:
t.Fatalf("Got an error from file creator goroutine: %v", err)
case err := <-w.Errors:
t.Fatalf("Got an error from watcher: %v", err)
case evt := <-w.Events:
if !strings.HasPrefix(evt.Name, testFilePrefix) {
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
}
if evt.Op == Create {
creates++
}
if evt.Op == Remove {
removes++
}
count = 0
default:
count++
// Give the watcher chances to fill the channels.
time.Sleep(time.Millisecond)
}
}
if creates-removes > 1 || creates-removes < -1 {
t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
}
if creates < 50 {
t.Fatalf("Expected at least 50 creates, got %d", creates)
}
}
func TestInotifyRemoveTwice(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
testFile := filepath.Join(testDir, "testfile")
handle, err := os.Create(testFile)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
handle.Close()
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testFile)
if err != nil {
t.Fatalf("Failed to add testFile: %v", err)
}
err = os.Remove(testFile)
if err != nil {
t.Fatalf("Failed to remove testFile: %v", err)
}
err = w.Remove(testFile)
if err == nil {
t.Fatalf("no error on removing invalid file")
}
s1 := fmt.Sprintf("%s", err)
err = w.Remove(testFile)
if err == nil {
t.Fatalf("no error on removing invalid file")
}
s2 := fmt.Sprintf("%s", err)
if s1 != s2 {
t.Fatalf("receive different error - %s / %s", s1, s2)
}
}
func TestInotifyInnerMapLength(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
testFile := filepath.Join(testDir, "testfile")
handle, err := os.Create(testFile)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
handle.Close()
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testFile)
if err != nil {
t.Fatalf("Failed to add testFile: %v", err)
}
go func() {
for err := range w.Errors {
t.Fatalf("error received: %s", err)
}
}()
err = os.Remove(testFile)
if err != nil {
t.Fatalf("Failed to remove testFile: %v", err)
}
_ = <-w.Events // consume Remove event
<-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
w.mu.Lock()
defer w.mu.Unlock()
if len(w.watches) != 0 {
t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
}
if len(w.paths) != 0 {
t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
}
}

147
vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go generated vendored Normal file
View file

@ -0,0 +1,147 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fsnotify
import (
"os"
"path/filepath"
"testing"
"time"
"golang.org/x/sys/unix"
)
// testExchangedataForWatcher tests the watcher with the exchangedata operation on OS X.
//
// This is widely used for atomic saves on OS X, e.g. TextMate and in Apple's NSDocument.
//
// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
func testExchangedataForWatcher(t *testing.T, watchDir bool) {
// Create directory to watch
testDir1 := tempMkdir(t)
// For the intermediate file
testDir2 := tempMkdir(t)
defer os.RemoveAll(testDir1)
defer os.RemoveAll(testDir2)
resolvedFilename := "TestFsnotifyEvents.file"
// TextMate does:
//
// 1. exchangedata (intermediate, resolved)
// 2. unlink intermediate
//
// Let's try to simulate that:
resolved := filepath.Join(testDir1, resolvedFilename)
intermediate := filepath.Join(testDir2, resolvedFilename+"~")
// Make sure we create the file before we start watching
createAndSyncFile(t, resolved)
watcher := newWatcher(t)
// Test both variants in isolation
if watchDir {
addWatch(t, watcher, testDir1)
} else {
addWatch(t, watcher, resolved)
}
// Receive errors on the error channel on a separate goroutine
go func() {
for err := range watcher.Errors {
t.Fatalf("error received: %s", err)
}
}()
// Receive events on the event channel on a separate goroutine
eventstream := watcher.Events
var removeReceived counter
var createReceived counter
done := make(chan bool)
go func() {
for event := range eventstream {
// Only count relevant events
if event.Name == filepath.Clean(resolved) {
if event.Op&Remove == Remove {
removeReceived.increment()
}
if event.Op&Create == Create {
createReceived.increment()
}
}
t.Logf("event received: %s", event)
}
done <- true
}()
// Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
for i := 1; i <= 3; i++ {
// The intermediate file is created in a folder outside the watcher
createAndSyncFile(t, intermediate)
// 1. Swap
if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
t.Fatalf("[%d] exchangedata failed: %s", i, err)
}
time.Sleep(50 * time.Millisecond)
// 2. Delete the intermediate file
err := os.Remove(intermediate)
if err != nil {
t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
}
time.Sleep(50 * time.Millisecond)
}
// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
time.Sleep(500 * time.Millisecond)
// The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
if removeReceived.value() < 3 {
t.Fatal("fsnotify remove events have not been received after 500 ms")
}
if createReceived.value() < 3 {
t.Fatal("fsnotify create events have not been received after 500 ms")
}
watcher.Close()
t.Log("waiting for the event channel to become closed...")
select {
case <-done:
t.Log("event channel closed")
case <-time.After(2 * time.Second):
t.Fatal("event stream was not closed after 2 seconds")
}
}
// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
func TestExchangedataInWatchedDir(t *testing.T) {
testExchangedataForWatcher(t, true)
}
// TestExchangedataInWatchedDir test exchangedata operation on watched file.
func TestExchangedataInWatchedFile(t *testing.T) {
testExchangedataForWatcher(t, false)
}
func createAndSyncFile(t *testing.T, filepath string) {
f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatalf("creating %s failed: %s", filepath, err)
}
f1.Sync()
f1.Close()
}

1237
vendor/gopkg.in/fsnotify.v1/integration_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

28
vendor/gopkg.in/gcfg.v1/LICENSE generated vendored
View file

@ -1,28 +0,0 @@
Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

4
vendor/gopkg.in/gcfg.v1/README generated vendored
View file

@ -1,4 +0,0 @@
Gcfg reads INI-style configuration files into Go structs;
supports user-defined types and subsections.
Package docs: https://godoc.org/gopkg.in/gcfg.v1

145
vendor/gopkg.in/gcfg.v1/doc.go generated vendored
View file

@ -1,145 +0,0 @@
// Package gcfg reads "INI-style" text-based configuration files with
// "name=value" pairs grouped into sections (gcfg files).
//
// This package is still a work in progress; see the sections below for planned
// changes.
//
// Syntax
//
// The syntax is based on that used by git config:
// http://git-scm.com/docs/git-config#_syntax .
// There are some (planned) differences compared to the git config format:
// - improve data portability:
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
// - include and "path" type is not supported
// (path type may be implementable as a user-defined type)
// - internationalization
// - section and variable names can contain unicode letters, unicode digits
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
// (U+002D), starting with a unicode letter
// - disallow potentially ambiguous or misleading definitions:
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
// - `[sec ""]` is not allowed
// - use `[sec]` for section name "sec" and empty subsection name
// - (planned) within a single file, definitions must be contiguous for each:
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
//
// Data structure
//
// The functions in this package read values into a user-defined struct.
// Each section corresponds to a struct field in the config struct, and each
// variable in a section corresponds to a data field in the section struct.
// The mapping of each section or variable name to fields is done either based
// on the "gcfg" struct tag or by matching the name of the section or variable,
// ignoring case. In the latter case, hyphens '-' in section and variable names
// correspond to underscores '_' in field names.
// Fields must be exported; to use a section or variable name starting with a
// letter that is neither upper- or lower-case, prefix the field name with 'X'.
// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
//
// For sections with subsections, the corresponding field in config must be a
// map, rather than a struct, with string keys and pointer-to-struct values.
// Values for subsection variables are stored in the map with the subsection
// name used as the map key.
// (Note that unlike section and variable names, subsection names are case
// sensitive.)
// When using a map, and there is a section with the same section name but
// without a subsection name, its values are stored with the empty string used
// as the key.
// It is possible to provide default values for subsections in the section
// "default-<sectionname>" (or by setting values in the corresponding struct
// field "Default_<sectionname>").
//
// The functions in this package panic if config is not a pointer to a struct,
// or when a field is not of a suitable type (either a struct or a map with
// string keys and pointer-to-struct values).
//
// Parsing of values
//
// The section structs in the config struct may contain single-valued or
// multi-valued variables. Variables of unnamed slice type (that is, a type
// starting with `[]`) are treated as multi-value; all others (including named
// slice types) are treated as single-valued variables.
//
// Single-valued variables are handled based on the type as follows.
// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
// and if necessary, a new instance is allocated.
//
// For types implementing the encoding.TextUnmarshaler interface, the
// UnmarshalText method is used to set the value. Implementing this method is
// the recommended way for parsing user-defined types.
//
// For fields of string kind, the value string is assigned to the field, after
// unquoting and unescaping as needed.
// For fields of bool kind, the field is set to true if the value is "true",
// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
// "0", ignoring case. In addition, single-valued bool fields can be specified
// with a "blank" value (variable name without equals sign and value); in such
// case the value is set to true.
//
// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
// unintuitively handling zero-padded numbers as octal.) Other types having
// [u]int* as the underlying type, such as os.FileMode and uintptr allow
// decimal, hexadecimal, or octal values.
// Parsing mode for integer types can be overridden using the struct tag option
// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
// (each standing for decimal, hexadecimal, and octal, respectively.)
//
// All other types are parsed using fmt.Sscanf with the "%v" verb.
//
// For multi-valued variables, each individual value is parsed as above and
// appended to the slice. If the first value is specified as a "blank" value
// (variable name without equals sign and value), a new slice is allocated;
// that is any values previously set in the slice will be ignored.
//
// The types subpackage for provides helpers for parsing "enum-like" and integer
// types.
//
// Error handling
//
// There are 3 types of errors:
//
// - programmer errors / panics:
// - invalid configuration structure
// - data errors:
// - fatal errors:
// - invalid configuration syntax
// - warnings:
// - data that doesn't belong to any part of the config structure
//
// Programmer errors trigger panics. These are should be fixed by the programmer
// before releasing code that uses gcfg.
//
// Data errors cause gcfg to return a non-nil error value. This includes the
// case when there are extra unknown key-value definitions in the configuration
// data (extra data).
// However, in some occasions it is desirable to be able to proceed in
// situations when the only data error is that of extra data.
// These errors are handled at a different (warning) priority and can be
// filtered out programmatically. To ignore extra data warnings, wrap the
// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
//
// TODO
//
// The following is a list of changes under consideration:
// - documentation
// - self-contained syntax documentation
// - more practical examples
// - move TODOs to issue tracker (eventually)
// - syntax
// - reconsider valid escape sequences
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
// - reading / parsing gcfg files
// - define internal representation structure
// - support multiple inputs (readers, strings, files)
// - support declaring encoding (?)
// - support varying fields sets for subsections (?)
// - writing gcfg files
// - error handling
// - make error context accessible programmatically?
// - limit input size?
//
package gcfg

41
vendor/gopkg.in/gcfg.v1/errors.go generated vendored
View file

@ -1,41 +0,0 @@
package gcfg
import (
"gopkg.in/warnings.v0"
)
// FatalOnly filters the results of a Read*Into invocation and returns only
// fatal errors. That is, errors (warnings) indicating data for unknown
// sections / variables is ignored. Example invocation:
//
// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
// if err != nil {
// ...
//
func FatalOnly(err error) error {
return warnings.FatalOnly(err)
}
func isFatal(err error) bool {
_, ok := err.(extraData)
return !ok
}
type extraData struct {
section string
subsection *string
variable *string
}
func (e extraData) Error() string {
s := "can't store data at section \"" + e.section + "\""
if e.subsection != nil {
s += ", subsection \"" + *e.subsection + "\""
}
if e.variable != nil {
s += ", variable \"" + *e.variable + "\""
}
return s
}
var _ error = extraData{}

7
vendor/gopkg.in/gcfg.v1/go1_0.go generated vendored
View file

@ -1,7 +0,0 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

9
vendor/gopkg.in/gcfg.v1/go1_2.go generated vendored
View file

@ -1,9 +0,0 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

238
vendor/gopkg.in/gcfg.v1/read.go generated vendored
View file

@ -1,238 +0,0 @@
package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"gopkg.in/gcfg.v1/scanner"
"gopkg.in/gcfg.v1/token"
"gopkg.in/warnings.v0"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
u, q, esc := make([]rune, 0, len(s)), false, false
for _, c := range s {
if esc {
uc, ok := unescape[c]
switch {
case ok:
u = append(u, uc)
fallthrough
case !q && c == '\n':
esc = false
continue
}
panic("invalid escape sequence")
}
switch c {
case '"':
q = !q
case '\\':
esc = true
default:
u = append(u, c)
}
}
if q {
panic("missing end quote")
}
if esc {
panic("invalid escape sequence")
}
return string(u)
}
func readIntoPass(c *warnings.Collector, config interface{}, fset *token.FileSet,
file *token.File, src []byte, subsectPass bool) error {
//
var s scanner.Scanner
var errs scanner.ErrorList
s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
sect, sectsub := "", ""
pos, tok, lit := s.Scan()
errfn := func(msg string) error {
return fmt.Errorf("%s: %s", fset.Position(pos), msg)
}
for {
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
switch tok {
case token.EOF:
return nil
case token.EOL, token.COMMENT:
pos, tok, lit = s.Scan()
case token.LBRACK:
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.IDENT {
if err := c.Collect(errfn("expected section name")); err != nil {
return err
}
}
sect, sectsub = lit, ""
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok == token.STRING {
sectsub = unquote(lit)
if sectsub == "" {
if err := c.Collect(errfn("empty subsection name")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
}
if tok != token.RBRACK {
if sectsub == "" {
if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected right bracket")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
// If a section/subsection header was found, ensure a
// container object is created, even if there are no
// variables further down.
err := c.Collect(set(c, config, sect, sectsub, "", true, "", subsectPass))
if err != nil {
return err
}
case token.IDENT:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
n := lit
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
if !blank {
if tok != token.ASSIGN {
if err := c.Collect(errfn("expected '='")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.STRING {
if err := c.Collect(errfn("expected value")); err != nil {
return err
}
}
v = unquote(lit)
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
}
err := set(c, config, sect, sectsub, n, blank, v, subsectPass)
if err != nil {
return err
}
default:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected section header or variable declaration")); err != nil {
return err
}
}
}
panic("never reached")
}
func readInto(config interface{}, fset *token.FileSet, file *token.File,
src []byte) error {
//
c := warnings.NewCollector(isFatal)
err := readIntoPass(c, config, fset, file, src, false)
if err != nil {
return err
}
err = readIntoPass(c, config, fset, file, src, true)
if err != nil {
return err
}
return c.Done()
}
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
return readInto(config, fset, file, src)
}
// ReadStringInto reads gcfg formatted data from str and sets the values into
// the corresponding fields in config.
func ReadStringInto(config interface{}, str string) error {
r := strings.NewReader(str)
return ReadInto(config, r)
}
// ReadFileInto reads gcfg formatted data from the file filename and sets the
// values into the corresponding fields in config.
func ReadFileInto(config interface{}, filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile(filename, fset.Base(), len(src))
return readInto(config, fset, file, src)
}

View file

@ -1,121 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package scanner
import (
"fmt"
"io"
"sort"
)
import (
"gopkg.in/gcfg.v1/token"
)
// In an ErrorList, an error is represented by an *Error.
// The position Pos, if valid, points to the beginning of
// the offending token, and the error condition is described
// by Msg.
//
type Error struct {
Pos token.Position
Msg string
}
// Error implements the error interface.
func (e Error) Error() string {
if e.Pos.Filename != "" || e.Pos.IsValid() {
// don't print "<unknown position>"
// TODO(gri) reconsider the semantics of Position.IsValid
return e.Pos.String() + ": " + e.Msg
}
return e.Msg
}
// ErrorList is a list of *Errors.
// The zero value for an ErrorList is an empty ErrorList ready to use.
//
type ErrorList []*Error
// Add adds an Error with given position and error message to an ErrorList.
func (p *ErrorList) Add(pos token.Position, msg string) {
*p = append(*p, &Error{pos, msg})
}
// Reset resets an ErrorList to no errors.
func (p *ErrorList) Reset() { *p = (*p)[0:0] }
// ErrorList implements the sort Interface.
func (p ErrorList) Len() int { return len(p) }
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ErrorList) Less(i, j int) bool {
e := &p[i].Pos
f := &p[j].Pos
if e.Filename < f.Filename {
return true
}
if e.Filename == f.Filename {
return e.Offset < f.Offset
}
return false
}
// Sort sorts an ErrorList. *Error entries are sorted by position,
// other errors are sorted by error message, and before any *Error
// entry.
//
func (p ErrorList) Sort() {
sort.Sort(p)
}
// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
func (p *ErrorList) RemoveMultiples() {
sort.Sort(p)
var last token.Position // initial last.Line is != any legal error line
i := 0
for _, e := range *p {
if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
last = e.Pos
(*p)[i] = e
i++
}
}
(*p) = (*p)[0:i]
}
// An ErrorList implements the error interface.
func (p ErrorList) Error() string {
switch len(p) {
case 0:
return "no errors"
case 1:
return p[0].Error()
}
return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
}
// Err returns an error equivalent to this error list.
// If the list is empty, Err returns nil.
func (p ErrorList) Err() error {
if len(p) == 0 {
return nil
}
return p
}
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
//
func PrintError(w io.Writer, err error) {
if list, ok := err.(ErrorList); ok {
for _, e := range list {
fmt.Fprintf(w, "%s\n", e)
}
} else if err != nil {
fmt.Fprintf(w, "%s\n", err)
}
}

View file

@ -1,342 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scanner implements a scanner for gcfg configuration text.
// It takes a []byte as source which can then be tokenized
// through repeated calls to the Scan method.
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
"fmt"
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"gopkg.in/gcfg.v1/token"
)
// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
dir string // directory portion of file.Name()
src []byte // source
err ErrorHandler // error reporting; or nil
mode Mode // scanning mode
// scanning state
ch rune // current character
offset int // character offset
rdOffset int // reading offset (position after current character)
lineOffset int // current line offset
nextVal bool // next token is expected to be a value
// public state - ok to modify
ErrorCount int // number of errors encountered
}
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
s.error(s.offset, "illegal character NUL")
case r >= 0x80:
// not ASCII
r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
s.error(s.offset, "illegal UTF-8 encoding")
}
}
s.rdOffset += w
s.ch = r
} else {
s.offset = len(s.src)
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
s.ch = -1 // eof
}
}
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
ScanComments Mode = 1 << iota // return comments as COMMENT tokens
)
// Init prepares the scanner s to tokenize the text src by setting the
// scanner at the beginning of src. The scanner uses the file set file
// for position information and it adds line information for each line.
// It is ok to re-use the same file when re-scanning the same file as
// line information which is already present is ignored. Init causes a
// panic if the file size does not match the src size.
//
// Calls to Scan will invoke the error handler err if they encounter a
// syntax error and err is not nil. Also, for each error encountered,
// the Scanner field ErrorCount is incremented by one. The mode parameter
// determines how comments are handled.
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
}
s.file = file
s.dir, _ = filepath.Split(file.Name())
s.src = src
s.err = err
s.mode = mode
s.ch = ' '
s.offset = 0
s.rdOffset = 0
s.lineOffset = 0
s.ErrorCount = 0
s.nextVal = false
s.next()
}
func (s *Scanner) error(offs int, msg string) {
if s.err != nil {
s.err(s.file.Position(s.file.Pos(offs)), msg)
}
s.ErrorCount++
}
func (s *Scanner) scanComment() string {
// initial [;#] already consumed
offs := s.offset - 1 // position of initial [;#]
for s.ch != '\n' && s.ch >= 0 {
s.next()
}
return string(s.src[offs:s.offset])
}
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
}
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
func (s *Scanner) scanIdentifier() string {
offs := s.offset
for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
s.next()
}
return string(s.src[offs:s.offset])
}
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
// ok
case 'n', 't':
if val {
break // ok
}
fallthrough
default:
s.error(offs, "unknown escape sequence")
}
}
func (s *Scanner) scanString() string {
// '"' opening already consumed
offs := s.offset - 1
for s.ch != '"' {
ch := s.ch
s.next()
if ch == '\n' || ch < 0 {
s.error(offs, "string not terminated")
break
}
if ch == '\\' {
s.scanEscape(false)
}
}
s.next()
return string(s.src[offs:s.offset])
}
func stripCR(b []byte) []byte {
c := make([]byte, len(b))
i := 0
for _, ch := range b {
if ch != '\r' {
c[i] = ch
i++
}
}
return c[:i]
}
func (s *Scanner) scanValString() string {
offs := s.offset
hasCR := false
end := offs
inQuote := false
loop:
for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
ch := s.ch
s.next()
switch {
case inQuote && ch == '\\':
s.scanEscape(true)
case !inQuote && ch == '\\':
if s.ch == '\r' {
hasCR = true
s.next()
}
if s.ch != '\n' {
s.error(offs, "unquoted '\\' must be followed by new line")
break loop
}
s.next()
case ch == '"':
inQuote = !inQuote
case ch == '\r':
hasCR = true
case ch < 0 || inQuote && ch == '\n':
s.error(offs, "string not terminated")
break loop
}
if inQuote || !isWhiteSpace(ch) {
end = s.offset
}
}
lit := s.src[offs:end]
if hasCR {
lit = stripCR(lit)
}
return string(lit)
}
func isWhiteSpace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\r'
}
func (s *Scanner) skipWhitespace() {
for isWhiteSpace(s.ch) {
s.next()
}
}
// Scan scans the next token and returns the token position, the token,
// and its literal string if applicable. The source end is indicated by
// token.EOF.
//
// If the returned token is a literal (token.IDENT, token.STRING) or
// token.COMMENT, the literal string has the corresponding value.
//
// If the returned token is token.ILLEGAL, the literal string is the
// offending character.
//
// In all other cases, Scan returns an empty literal string.
//
// For more tolerant parsing, Scan will return a valid token if
// possible even if a syntax error was encountered. Thus, even
// if the resulting token sequence contains no illegal tokens,
// a client may not assume that no error occurred. Instead it
// must check the scanner's ErrorCount or the number of calls
// of the error handler, if there was one installed.
//
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()
// current token start
pos = s.file.Pos(s.offset)
// determine token value
switch ch := s.ch; {
case s.nextVal:
lit = s.scanValString()
tok = token.STRING
s.nextVal = false
case isLetter(ch):
lit = s.scanIdentifier()
tok = token.IDENT
default:
s.next() // always make progress
switch ch {
case -1:
tok = token.EOF
case '\n':
tok = token.EOL
case '"':
tok = token.STRING
lit = s.scanString()
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case ';', '#':
// comment
lit = s.scanComment()
if s.mode&ScanComments == 0 {
// skip comment
goto scanAgain
}
tok = token.COMMENT
case '=':
tok = token.ASSIGN
s.nextVal = true
default:
s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
tok = token.ILLEGAL
lit = string(ch)
}
}
return
}

333
vendor/gopkg.in/gcfg.v1/set.go generated vendored
View file

@ -1,333 +0,0 @@
package gcfg
import (
"bytes"
"encoding/gob"
"fmt"
"math/big"
"reflect"
"strings"
"unicode"
"unicode/utf8"
"gopkg.in/gcfg.v1/types"
"gopkg.in/warnings.v0"
)
type tag struct {
ident string
intMode string
}
func newTag(ts string) tag {
t := tag{}
s := strings.Split(ts, ",")
t.ident = s[0]
for _, tse := range s[1:] {
if strings.HasPrefix(tse, "int=") {
t.intMode = tse[len("int="):]
}
}
return t
}
func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
var n string
r0, _ := utf8.DecodeRuneInString(name)
if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
n = "X"
}
n += strings.Replace(name, "-", "_", -1)
f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
if !v.FieldByName(fieldName).CanSet() {
return false
}
f, _ := v.Type().FieldByName(fieldName)
t := newTag(f.Tag.Get("gcfg"))
if t.ident != "" {
return strings.EqualFold(t.ident, name)
}
return strings.EqualFold(n, fieldName)
})
if !ok {
return reflect.Value{}, tag{}
}
return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
}
type setter func(destp interface{}, blank bool, val string, t tag) error
var errUnsupportedType = fmt.Errorf("unsupported type")
var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
var setters = []setter{
typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
if !ok {
return errUnsupportedType
}
if blank {
return errBlankUnsupported
}
return dtu.UnmarshalText([]byte(val))
}
func boolSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
return nil
}
b, err := types.ParseBool(val)
if err == nil {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
}
return err
}
func intMode(mode string) types.IntMode {
var m types.IntMode
if strings.ContainsAny(mode, "dD") {
m |= types.Dec
}
if strings.ContainsAny(mode, "hH") {
m |= types.Hex
}
if strings.ContainsAny(mode, "oO") {
m |= types.Oct
}
return m
}
var typeModes = map[reflect.Type]types.IntMode{
reflect.TypeOf(int(0)): types.Dec | types.Hex,
reflect.TypeOf(int8(0)): types.Dec | types.Hex,
reflect.TypeOf(int16(0)): types.Dec | types.Hex,
reflect.TypeOf(int32(0)): types.Dec | types.Hex,
reflect.TypeOf(int64(0)): types.Dec | types.Hex,
reflect.TypeOf(uint(0)): types.Dec | types.Hex,
reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
// use default mode (allow dec/hex/oct) for uintptr type
reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
}
func intModeDefault(t reflect.Type) types.IntMode {
m, ok := typeModes[t]
if !ok {
m = types.Dec | types.Hex | types.Oct
}
return m
}
func intSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
mode := intMode(t.intMode)
if mode == 0 {
mode = intModeDefault(reflect.TypeOf(d).Elem())
}
return types.ParseInt(d, val, mode)
}
func stringSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
dsp, ok := d.(*string)
if !ok {
return errUnsupportedType
}
*dsp = val
return nil
}
var kindSetters = map[reflect.Kind]setter{
reflect.String: stringSetter,
reflect.Bool: boolSetter,
reflect.Int: intSetter,
reflect.Int8: intSetter,
reflect.Int16: intSetter,
reflect.Int32: intSetter,
reflect.Int64: intSetter,
reflect.Uint: intSetter,
reflect.Uint8: intSetter,
reflect.Uint16: intSetter,
reflect.Uint32: intSetter,
reflect.Uint64: intSetter,
reflect.Uintptr: intSetter,
}
var typeSetters = map[reflect.Type]setter{
reflect.TypeOf(big.Int{}): intSetter,
}
func typeSetter(d interface{}, blank bool, val string, tt tag) error {
t := reflect.ValueOf(d).Type().Elem()
setter, ok := typeSetters[t]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func kindSetter(d interface{}, blank bool, val string, tt tag) error {
k := reflect.ValueOf(d).Type().Elem().Kind()
setter, ok := kindSetters[k]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func scanSetter(d interface{}, blank bool, val string, tt tag) error {
if blank {
return errBlankUnsupported
}
return types.ScanFully(d, val, 'v')
}
func newValue(sect string, vCfg reflect.Value, vType reflect.Type) (reflect.Value, error) {
pv := reflect.New(vType)
dfltName := "default-" + sect
dfltField, _ := fieldFold(vCfg, dfltName)
var err error
if dfltField.IsValid() {
b := bytes.NewBuffer(nil)
ge := gob.NewEncoder(b)
err = ge.EncodeValue(dfltField)
if err != nil {
return pv, err
}
gd := gob.NewDecoder(bytes.NewReader(b.Bytes()))
err = gd.DecodeValue(pv.Elem())
if err != nil {
return pv, err
}
}
return pv, nil
}
func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
blank bool, value string, subsectPass bool) error {
//
vPCfg := reflect.ValueOf(cfg)
if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("config must be a pointer to a struct"))
}
vCfg := vPCfg.Elem()
vSect, _ := fieldFold(vCfg, sect)
if !vSect.IsValid() {
err := extraData{section: sect}
return c.Collect(err)
}
isSubsect := vSect.Kind() == reflect.Map
if subsectPass != isSubsect {
return nil
}
if isSubsect {
vst := vSect.Type()
if vst.Key().Kind() != reflect.String ||
vst.Elem().Kind() != reflect.Ptr ||
vst.Elem().Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("map field for section must have string keys and "+
" pointer-to-struct values: section %q", sect))
}
if vSect.IsNil() {
vSect.Set(reflect.MakeMap(vst))
}
k := reflect.ValueOf(sub)
pv := vSect.MapIndex(k)
if !pv.IsValid() {
vType := vSect.Type().Elem().Elem()
var err error
pv, err = newValue(sect, vCfg, vType)
if err != nil {
return err
}
vSect.SetMapIndex(k, pv)
}
vSect = pv.Elem()
} else if vSect.Kind() != reflect.Struct {
panic(fmt.Errorf("field for section must be a map or a struct: "+
"section %q", sect))
} else if sub != "" {
err := extraData{section: sect, subsection: &sub}
return c.Collect(err)
}
// Empty name is a special value, meaning that only the
// section/subsection object is to be created, with no values set.
if name == "" {
return nil
}
vVar, t := fieldFold(vSect, name)
if !vVar.IsValid() {
var err error
if isSubsect {
err = extraData{section: sect, subsection: &sub, variable: &name}
} else {
err = extraData{section: sect, variable: &name}
}
return c.Collect(err)
}
// vVal is either single-valued var, or newly allocated value within multi-valued var
var vVal reflect.Value
// multi-value if unnamed slice type
isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
if isMulti && vVar.Kind() == reflect.Ptr {
if vVar.IsNil() {
vVar.Set(reflect.New(vVar.Type().Elem()))
}
vVar = vVar.Elem()
}
if isMulti && blank {
vVar.Set(reflect.Zero(vVar.Type()))
return nil
}
if isMulti {
vVal = reflect.New(vVar.Type().Elem()).Elem()
} else {
vVal = vVar
}
isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
isNew := isDeref && vVal.IsNil()
// vAddr is address of value to set (dereferenced & allocated as needed)
var vAddr reflect.Value
switch {
case isNew:
vAddr = reflect.New(vVal.Type().Elem())
case isDeref && !isNew:
vAddr = vVal
default:
vAddr = vVal.Addr()
}
vAddrI := vAddr.Interface()
err, ok := error(nil), false
for _, s := range setters {
err = s(vAddrI, blank, value, t)
if err == nil {
ok = true
break
}
if err != errUnsupportedType {
return err
}
}
if !ok {
// in case all setters returned errUnsupportedType
return err
}
if isNew { // set reference if it was dereferenced and newly allocated
vVal.Set(vAddr)
}
if isMulti { // append if multi-valued
vVar.Set(reflect.Append(vVar, vVal))
}
return nil
}

View file

@ -1,435 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(gri) consider making this a separate package outside the go directory.
package token
import (
"fmt"
"sort"
"sync"
)
// -----------------------------------------------------------------------------
// Positions
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
//
type Position struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
//
func (pos Position) String() string {
s := pos.Filename
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
// Pos is a compact encoding of a source position within a file set.
// It can be converted into a Position for a more convenient, but much
// larger, representation.
//
// The Pos value for a given file is a number in the range [base, base+size],
// where base and size are specified when adding the file to the file set via
// AddFile.
//
// To create the Pos value for a specific source offset, first add
// the respective file to the current file set (via FileSet.AddFile)
// and then call File.Pos(offset) for that file. Given a Pos value p
// for a specific file set fset, the corresponding Position value is
// obtained by calling fset.Position(p).
//
// Pos values can be compared directly with the usual comparison operators:
// If two Pos values p and q are in the same file, comparing p and q is
// equivalent to comparing the respective source file offsets. If p and q
// are in different files, p < q is true if the file implied by p was added
// to the respective file set before the file implied by q.
//
type Pos int
// The zero value for Pos is NoPos; there is no file and line information
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
// for NoPos is the zero value for Position.
//
const NoPos Pos = 0
// IsValid returns true if the position is valid.
func (p Pos) IsValid() bool {
return p != NoPos
}
// -----------------------------------------------------------------------------
// File
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
//
type File struct {
set *FileSet
name string // file name as provided to AddFile
base int // Pos value range for this file is [base...base+size]
size int // file size as provided to AddFile
// lines and infos are protected by set.mutex
lines []int
infos []lineInfo
}
// Name returns the file name of file f as registered with AddFile.
func (f *File) Name() string {
return f.name
}
// Base returns the base offset of file f as registered with AddFile.
func (f *File) Base() int {
return f.base
}
// Size returns the size of file f as registered with AddFile.
func (f *File) Size() int {
return f.size
}
// LineCount returns the number of lines in file f.
func (f *File) LineCount() int {
f.set.mutex.RLock()
n := len(f.lines)
f.set.mutex.RUnlock()
return n
}
// AddLine adds the line offset for a new line.
// The line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise the line offset is ignored.
//
func (f *File) AddLine(offset int) {
f.set.mutex.Lock()
if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
f.lines = append(f.lines, offset)
}
f.set.mutex.Unlock()
}
// SetLines sets the line offsets for a file and returns true if successful.
// The line offsets are the offsets of the first character of each line;
// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
// An empty file has an empty line offset table.
// Each line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise SetLines fails and returns
// false.
//
func (f *File) SetLines(lines []int) bool {
// verify validity of lines table
size := f.size
for i, offset := range lines {
if i > 0 && offset <= lines[i-1] || size <= offset {
return false
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
return true
}
// SetLinesForContent sets the line offsets for the given file content.
func (f *File) SetLinesForContent(content []byte) {
var lines []int
line := 0
for offset, b := range content {
if line >= 0 {
lines = append(lines, line)
}
line = -1
if b == '\n' {
line = offset + 1
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
}
// A lineInfo object describes alternative file and line number
// information (such as provided via a //line comment in a .go
// file) for a given file offset.
type lineInfo struct {
// fields are exported to make them accessible to gob
Offset int
Filename string
Line int
}
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
// file size; otherwise the information is ignored.
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
}
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
//
func (f *File) Pos(offset int) Pos {
if offset > f.size {
panic("illegal file offset")
}
return Pos(f.base + offset)
}
// Offset returns the offset for the given file position p;
// p must be a valid Pos value in that file.
// f.Offset(f.Pos(offset)) == offset.
//
func (f *File) Offset(p Pos) int {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
return int(p) - f.base
}
// Line returns the line number for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Line(p Pos) int {
// TODO(gri) this can be implemented much more efficiently
return f.Position(p).Line
}
func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
// info returns the file name, line, and column number for a file offset.
func (f *File) info(offset int) (filename string, line, column int) {
filename = f.name
if i := searchInts(f.lines, offset); i >= 0 {
line, column = i+1, offset-f.lines[i]+1
}
if len(f.infos) > 0 {
// almost no files have extra line infos
if i := searchLineInfos(f.infos, offset); i >= 0 {
alt := &f.infos[i]
filename = alt.Filename
if i := searchInts(f.lines, alt.Offset); i >= 0 {
line += alt.Line - i - 1
}
}
}
return
}
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
pos.Filename, pos.Line, pos.Column = f.info(offset)
return
}
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
pos = f.position(p)
}
return
}
// -----------------------------------------------------------------------------
// FileSet
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
//
type FileSet struct {
mutex sync.RWMutex // protects the file set
base int // base offset for the next file
files []*File // list of files in the order added to the set
last *File // cache of last file looked up
}
// NewFileSet creates a new file set.
func NewFileSet() *FileSet {
s := new(FileSet)
s.base = 1 // 0 == NoPos
return s
}
// Base returns the minimum base offset that must be provided to
// AddFile when adding the next file.
//
func (s *FileSet) Base() int {
s.mutex.RLock()
b := s.base
s.mutex.RUnlock()
return b
}
// AddFile adds a new file with a given filename, base offset, and file size
// to the file set s and returns the file. Multiple files may have the same
// name. The base offset must not be smaller than the FileSet's Base(), and
// size must not be negative.
//
// Adding the file will set the file set's Base() value to base + size + 1
// as the minimum base value for the next file. The following relationship
// exists between a Pos value p for a given file offset offs:
//
// int(p) = base + offs
//
// with offs in the range [0, size] and thus p in the range [base, base+size].
// For convenience, File.Pos may be used to create file-specific position
// values from a file offset.
//
func (s *FileSet) AddFile(filename string, base, size int) *File {
s.mutex.Lock()
defer s.mutex.Unlock()
if base < s.base || size < 0 {
panic("illegal base or size")
}
// base >= s.base && size >= 0
f := &File{s, filename, base, size, []int{0}, nil}
base += size + 1 // +1 because EOF also has a position
if base < 0 {
panic("token.Pos offset overflow (> 2G of source code in file set)")
}
// add the file to the file set
s.base = base
s.files = append(s.files, f)
s.last = f
return f
}
// Iterate calls f for the files in the file set in the order they were added
// until f returns false.
//
func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ {
var file *File
s.mutex.RLock()
if i < len(s.files) {
file = s.files[i]
}
s.mutex.RUnlock()
if file == nil || !f(file) {
break
}
}
}
func searchFiles(a []*File, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
}
func (s *FileSet) file(p Pos) *File {
// common case: p is in last file
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
}
// p is not in last file - search all files
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
s.last = f
return f
}
}
return nil
}
// File returns the file that contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()
f = s.file(p)
s.mutex.RUnlock()
}
return
}
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
s.mutex.RUnlock()
}
return
}
// -----------------------------------------------------------------------------
// Helper functions
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
//
// With better compiler optimizations, this may not be needed in the
// future, but at the moment this change improves the go/printer
// benchmark performance by ~30%. This has a direct impact on the
// speed of gofmt and thus seems worthwhile (2011-04-29).
// TODO(gri): Remove this when compilers have caught up.
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2 // avoid overflow when computing h
// i ≤ h < j
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
}

View file

@ -1,56 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
type serializedFile struct {
// fields correspond 1:1 to fields with same (lower-case) name in File
Name string
Base int
Size int
Lines []int
Infos []lineInfo
}
type serializedFileSet struct {
Base int
Files []serializedFile
}
// Read calls decode to deserialize a file set into s; s must not be nil.
func (s *FileSet) Read(decode func(interface{}) error) error {
var ss serializedFileSet
if err := decode(&ss); err != nil {
return err
}
s.mutex.Lock()
s.base = ss.Base
files := make([]*File, len(ss.Files))
for i := 0; i < len(ss.Files); i++ {
f := &ss.Files[i]
files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
}
s.files = files
s.last = nil
s.mutex.Unlock()
return nil
}
// Write calls encode to serialize the file set s.
func (s *FileSet) Write(encode func(interface{}) error) error {
var ss serializedFileSet
s.mutex.Lock()
ss.Base = s.base
files := make([]serializedFile, len(s.files))
for i, f := range s.files {
files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
}
ss.Files = files
s.mutex.Unlock()
return encode(ss)
}

View file

@ -1,83 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package token defines constants representing the lexical tokens of the gcfg
// configuration syntax and basic operations on tokens (printing, predicates).
//
// Note that the API for the token package may change to accommodate new
// features or implementation changes in gcfg.
//
package token
import "strconv"
// Token is the set of lexical tokens of the gcfg configuration syntax.
type Token int
// The list of tokens.
const (
// Special tokens
ILLEGAL Token = iota
EOF
COMMENT
literal_beg
// Identifiers and basic type literals
// (these tokens stand for classes of literals)
IDENT // section-name, variable-name
STRING // "subsection-name", variable value
literal_end
operator_beg
// Operators and delimiters
ASSIGN // =
LBRACK // [
RBRACK // ]
EOL // \n
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
STRING: "STRING",
ASSIGN: "=",
LBRACK: "[",
RBRACK: "]",
EOL: "\n",
}
// String returns the string corresponding to the token tok.
// For operators and delimiters, the string is the actual token character
// sequence (e.g., for the token ASSIGN, the string is "="). For all other
// tokens the string corresponds to the token constant name (e.g. for the
// token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
s := ""
if 0 <= tok && tok < Token(len(tokens)) {
s = tokens[tok]
}
if s == "" {
s = "token(" + strconv.Itoa(int(tok)) + ")"
}
return s
}
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }

View file

@ -1,23 +0,0 @@
package types
// BoolValues defines the name and value mappings for ParseBool.
var BoolValues = map[string]interface{}{
"true": true, "yes": true, "on": true, "1": true,
"false": false, "no": false, "off": false, "0": false,
}
var boolParser = func() *EnumParser {
ep := &EnumParser{}
ep.AddVals(BoolValues)
return ep
}()
// ParseBool parses bool values according to the definitions in BoolValues.
// Parsing is case-insensitive.
func ParseBool(s string) (bool, error) {
v, err := boolParser.Parse(s)
if err != nil {
return false, err
}
return v.(bool), nil
}

View file

@ -1,4 +0,0 @@
// Package types defines helpers for type conversions.
//
// The API for this package is not finalized yet.
package types

View file

@ -1,44 +0,0 @@
package types
import (
"fmt"
"reflect"
"strings"
)
// EnumParser parses "enum" values; i.e. a predefined set of strings to
// predefined values.
type EnumParser struct {
Type string // type name; if not set, use type of first value added
CaseMatch bool // if true, matching of strings is case-sensitive
// PrefixMatch bool
vals map[string]interface{}
}
// AddVals adds strings and values to an EnumParser.
func (ep *EnumParser) AddVals(vals map[string]interface{}) {
if ep.vals == nil {
ep.vals = make(map[string]interface{})
}
for k, v := range vals {
if ep.Type == "" {
ep.Type = reflect.TypeOf(v).Name()
}
if !ep.CaseMatch {
k = strings.ToLower(k)
}
ep.vals[k] = v
}
}
// Parse parses the string and returns the value or an error.
func (ep EnumParser) Parse(s string) (interface{}, error) {
if !ep.CaseMatch {
s = strings.ToLower(s)
}
v, ok := ep.vals[s]
if !ok {
return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
}
return v, nil
}

86
vendor/gopkg.in/gcfg.v1/types/int.go generated vendored
View file

@ -1,86 +0,0 @@
package types
import (
"fmt"
"strings"
)
// An IntMode is a mode for parsing integer values, representing a set of
// accepted bases.
type IntMode uint8
// IntMode values for ParseInt; can be combined using binary or.
const (
Dec IntMode = 1 << iota
Hex
Oct
)
// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
func (m IntMode) String() string {
var modes []string
if m&Dec != 0 {
modes = append(modes, "Dec")
}
if m&Hex != 0 {
modes = append(modes, "Hex")
}
if m&Oct != 0 {
modes = append(modes, "Oct")
}
return "IntMode(" + strings.Join(modes, "|") + ")"
}
var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
func prefix0(val string) bool {
return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
}
func prefix0x(val string) bool {
return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
}
// ParseInt parses val using mode into intptr, which must be a pointer to an
// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
// when mode permits ambiguity of base; otherwise the prefix can be omitted.
func ParseInt(intptr interface{}, val string, mode IntMode) error {
val = strings.TrimSpace(val)
verb := byte(0)
switch mode {
case Dec:
verb = 'd'
case Dec + Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Oct:
if prefix0(val) && !prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Hex + Oct:
verb = 'v'
case Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'x'
}
case Oct:
verb = 'o'
case Hex + Oct:
if prefix0(val) {
verb = 'v'
} else {
return errIntAmbig
}
}
if verb == 0 {
panic("unsupported mode")
}
return ScanFully(intptr, val, verb)
}

View file

@ -1,23 +0,0 @@
package types
import (
"fmt"
"io"
"reflect"
)
// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
func ScanFully(ptr interface{}, val string, verb byte) error {
t := reflect.ValueOf(ptr).Elem().Type()
// attempt to read extra bytes to make sure the value is consumed
var b []byte
n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
switch {
case n < 1 || n == 1 && err != io.EOF:
return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
case n > 1:
return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
}
// n == 1 && err == io.EOF
return nil
}

View file

@ -0,0 +1,172 @@
package pool
import (
"sync"
"testing"
"time"
. "gopkg.in/go-playground/assert.v1"
)
// NOTES:
// - Run "go test" to run tests
// - Run "gocov test | gocov report" to report on test converage by file
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
//
// or
//
// -- may be a good idea to change to output path to somewherelike /tmp
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
//
func TestLimitedBatch(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
pool := NewLimited(4)
defer pool.Close()
batch := pool.Batch()
for i := 0; i < 4; i++ {
batch.Queue(newFunc(i))
}
batch.QueueComplete()
var count int
for range batch.Results() {
count++
}
Equal(t, count, 4)
}
func TestLimitedBatchGlobalPool(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
batch := limitedGpool.Batch()
for i := 0; i < 4; i++ {
batch.Queue(newFunc(i))
}
batch.QueueComplete()
var count int
for range batch.Results() {
count++
}
Equal(t, count, 4)
}
func TestLimitedBatchCancelItemsThrownAway(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
pool := NewLimited(4)
defer pool.Close()
batch := pool.Batch()
go func() {
for i := 0; i < 40; i++ {
batch.Queue(newFunc(i))
}
}()
batch.Cancel()
var count int
for range batch.Results() {
count++
}
NotEqual(t, count, 40)
}
func TestLimitedBatchCancelItemsCancelledAfterward(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
pool := NewLimited(4)
defer pool.Close()
batch := pool.Batch()
go func() {
for i := 0; i < 40; i++ {
batch.Queue(newFunc(i))
}
}()
time.Sleep(time.Second * 2)
batch.Cancel()
var count int
for range batch.Results() {
count++
}
Equal(t, count, 40)
}
func TestLimitedBatchWaitAll(t *testing.T) {
var count int
var m sync.Mutex
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
m.Lock()
count++
m.Unlock()
return i, nil
}
}
pool := NewLimited(4)
defer pool.Close()
batch := pool.Batch()
go func() {
for i := 0; i < 10; i++ {
batch.Queue(newFunc(i))
}
batch.QueueComplete()
}()
batch.WaitAll()
Equal(t, count, 10)
}

View file

@ -0,0 +1,172 @@
package pool
import (
"sync"
"testing"
"time"
. "gopkg.in/go-playground/assert.v1"
)
// NOTES:
// - Run "go test" to run tests
// - Run "gocov test | gocov report" to report on test converage by file
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
//
// or
//
// -- may be a good idea to change to output path to somewherelike /tmp
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
//
func TestUnlimitedBatch(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
pool := New()
defer pool.Close()
batch := pool.Batch()
for i := 0; i < 4; i++ {
batch.Queue(newFunc(i))
}
batch.QueueComplete()
var count int
for range batch.Results() {
count++
}
Equal(t, count, 4)
}
func TestUnlimitedBatchGlobalPool(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
batch := unlimitedGpool.Batch()
for i := 0; i < 4; i++ {
batch.Queue(newFunc(i))
}
batch.QueueComplete()
var count int
for range batch.Results() {
count++
}
Equal(t, count, 4)
}
func TestUnlimitedBatchCancelItemsThrownAway(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
pool := New()
defer pool.Close()
batch := pool.Batch()
go func() {
for i := 0; i < 40; i++ {
batch.Queue(newFunc(i))
}
}()
batch.Cancel()
var count int
for range batch.Results() {
count++
}
NotEqual(t, count, 40)
}
func TestUnlimitedBatchCancelItemsCancelledAfterward(t *testing.T) {
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
return i, nil
}
}
pool := New()
defer pool.Close()
batch := pool.Batch()
go func() {
for i := 0; i < 40; i++ {
batch.Queue(newFunc(i))
}
}()
time.Sleep(time.Second * 2)
batch.Cancel()
var count int
for range batch.Results() {
count++
}
Equal(t, count, 40)
}
func TestUnlimitedBatchWaitAll(t *testing.T) {
var count int
var m sync.Mutex
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
return func(WorkUnit) (interface{}, error) {
time.Sleep(time.Second * 1)
m.Lock()
count++
m.Unlock()
return i, nil
}
}
pool := New()
defer pool.Close()
batch := pool.Batch()
go func() {
for i := 0; i < 10; i++ {
batch.Queue(newFunc(i))
}
batch.QueueComplete()
}()
batch.WaitAll()
Equal(t, count, 10)
}

View file

@ -0,0 +1,66 @@
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
var gpool = pool.NewLimited(5)
func main() {
// OK so maybe you want a long running pool to maximize throughput
// yet limit the # of workers eg. email provider may limit the # of
// concurrent connection you can have so spin up a pool with the #
// of workers being that limit and then can batch
// (or send per unit if desired) then can maximize email sending throughput
// without breaking your providers limits.
batch := gpool.Batch()
// for max speed Queue in another goroutine
// but it is not required, just can't start reading results
// until all items are Queued.
go func() {
for i := 0; i < 10; i++ {
batch.Queue(sendEmail("email content"))
}
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
// if calling Cancel() it calles QueueComplete() internally
batch.QueueComplete()
}()
for email := range batch.Results() {
if err := email.Error(); err != nil {
// handle error
// maybe call batch.Cancel()
}
// use return value
fmt.Println(email.Value().(bool))
}
}
func sendEmail(email string) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return true, nil // everything ok, send nil, error if not
}
}

View file

@ -0,0 +1,59 @@
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.NewLimited(10)
defer p.Close()
batch := p.Batch()
// for max speed Queue in another goroutine
// but it is not required, just can't start reading results
// until all items are Queued.
go func() {
for i := 0; i < 10; i++ {
batch.Queue(sendEmail("email content"))
}
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
// if calling Cancel() it calles QueueComplete() internally
batch.QueueComplete()
}()
for email := range batch.Results() {
if err := email.Error(); err != nil {
// handle error
// maybe call batch.Cancel()
}
// use return value
fmt.Println(email.Value().(bool))
}
}
func sendEmail(email string) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return true, nil // everything ok, send nil, error if not
}
}

View file

@ -0,0 +1,73 @@
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.NewLimited(10)
defer p.Close()
user := p.Queue(getUser(13))
other := p.Queue(getOtherInfo(13))
user.Wait()
if err := user.Error(); err != nil {
// handle error
}
// do stuff with user
username := user.Value().(string)
fmt.Println(username)
other.Wait()
if err := other.Error(); err != nil {
// handle error
}
// do stuff with other
otherInfo := other.Value().(string)
fmt.Println(otherInfo)
}
func getUser(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Joeybloggs", nil
}
}
func getOtherInfo(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Other Info", nil
}
}

View file

@ -0,0 +1,66 @@
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
var gpool = pool.New()
func main() {
// OK so maybe you want a long running pool to maximize throughput
// yet limit the # of workers eg. email provider may limit the # of
// concurrent connection you can have so spin up a pool with the #
// of workers being that limit and then can batch
// (or send per unit if desired) then can maximize email sending throughput
// without breaking your providers limits.
batch := gpool.Batch()
// for max speed Queue in another goroutine
// but it is not required, just can't start reading results
// until all items are Queued.
go func() {
for i := 0; i < 10; i++ {
batch.Queue(sendEmail("email content"))
}
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
// if calling Cancel() it calles QueueComplete() internally
batch.QueueComplete()
}()
for email := range batch.Results() {
if err := email.Error(); err != nil {
// handle error
// maybe call batch.Cancel()
}
// use return value
fmt.Println(email.Value().(bool))
}
}
func sendEmail(email string) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return true, nil // everything ok, send nil, error if not
}
}

View file

@ -0,0 +1,59 @@
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.New()
defer p.Close()
batch := p.Batch()
// for max speed Queue in another goroutine
// but it is not required, just can't start reading results
// until all items are Queued.
go func() {
for i := 0; i < 10; i++ {
batch.Queue(sendEmail("email content"))
}
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
// if calling Cancel() it calles QueueComplete() internally
batch.QueueComplete()
}()
for email := range batch.Results() {
if err := email.Error(); err != nil {
// handle error
// maybe call batch.Cancel()
}
// use return value
fmt.Println(email.Value().(bool))
}
}
func sendEmail(email string) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return true, nil // everything ok, send nil, error if not
}
}

View file

@ -0,0 +1,73 @@
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.New()
defer p.Close()
user := p.Queue(getUser(13))
other := p.Queue(getOtherInfo(13))
user.Wait()
if err := user.Error(); err != nil {
// handle error
}
// do stuff with user
username := user.Value().(string)
fmt.Println(username)
other.Wait()
if err := other.Error(); err != nil {
// handle error
}
// do stuff with other
otherInfo := other.Value().(string)
fmt.Println(otherInfo)
}
func getUser(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Joeybloggs", nil
}
}
func getOtherInfo(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Other Info", nil
}
}

View file

@ -0,0 +1,185 @@
package pool
import (
"testing"
"time"
)
func BenchmarkLimitedSmallRun(b *testing.B) {
res := make([]WorkUnit, 10)
b.ReportAllocs()
pool := NewLimited(10)
defer pool.Close()
fn := func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return 1, nil
}
for i := 0; i < 10; i++ {
res[i] = pool.Queue(fn)
}
var count int
for _, cw := range res {
cw.Wait()
if cw.Error() == nil {
count += cw.Value().(int)
}
}
if count != 10 {
b.Fatal("Count Incorrect")
}
}
func BenchmarkLimitedSmallCancel(b *testing.B) {
res := make([]WorkUnit, 0, 20)
b.ReportAllocs()
pool := NewLimited(4)
defer pool.Close()
newFunc := func(i int) WorkFunc {
return func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return i, nil
}
}
for i := 0; i < 20; i++ {
if i == 6 {
pool.Cancel()
}
res = append(res, pool.Queue(newFunc(i)))
}
for _, wrk := range res {
if wrk == nil {
continue
}
wrk.Wait()
}
}
func BenchmarkLimitedLargeCancel(b *testing.B) {
res := make([]WorkUnit, 0, 1000)
b.ReportAllocs()
pool := NewLimited(4)
defer pool.Close()
newFunc := func(i int) WorkFunc {
return func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return i, nil
}
}
for i := 0; i < 1000; i++ {
if i == 6 {
pool.Cancel()
}
res = append(res, pool.Queue(newFunc(i)))
}
for _, wrk := range res {
if wrk == nil {
continue
}
wrk.Wait()
}
}
func BenchmarkLimitedOverconsumeLargeRun(b *testing.B) {
res := make([]WorkUnit, 100)
b.ReportAllocs()
pool := NewLimited(25)
defer pool.Close()
newFunc := func(i int) WorkFunc {
return func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return 1, nil
}
}
for i := 0; i < 100; i++ {
res[i] = pool.Queue(newFunc(i))
}
var count int
for _, cw := range res {
cw.Wait()
count += cw.Value().(int)
}
if count != 100 {
b.Fatalf("Count Incorrect, Expected '100' Got '%d'", count)
}
}
func BenchmarkLimitedBatchSmallRun(b *testing.B) {
fn := func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return 1, nil
}
pool := NewLimited(10)
defer pool.Close()
batch := pool.Batch()
for i := 0; i < 10; i++ {
batch.Queue(fn)
}
batch.QueueComplete()
var count int
for cw := range batch.Results() {
count += cw.Value().(int)
}
if count != 10 {
b.Fatal("Count Incorrect")
}
}

View file

@ -0,0 +1,177 @@
package pool
import (
"sync"
"testing"
"time"
. "gopkg.in/go-playground/assert.v1"
)
// NOTES:
// - Run "go test" to run tests
// - Run "gocov test | gocov report" to report on test converage by file
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
//
// or
//
// -- may be a good idea to change to output path to somewherelike /tmp
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
//
func TestPool(t *testing.T) {
var res []WorkUnit
pool := NewLimited(4)
defer pool.Close()
newFunc := func(d time.Duration) WorkFunc {
return func(WorkUnit) (interface{}, error) {
time.Sleep(d)
return nil, nil
}
}
for i := 0; i < 4; i++ {
wu := pool.Queue(newFunc(time.Second * 1))
res = append(res, wu)
}
var count int
for _, wu := range res {
wu.Wait()
Equal(t, wu.Error(), nil)
Equal(t, wu.Value(), nil)
count++
}
Equal(t, count, 4)
pool.Close() // testing no error occurs as Close will be called twice once defer pool.Close() fires
}
func TestCancel(t *testing.T) {
m := new(sync.RWMutex)
var closed bool
c := make(chan WorkUnit, 100)
pool := limitedGpool
defer pool.Close()
newFunc := func(d time.Duration) WorkFunc {
return func(WorkUnit) (interface{}, error) {
time.Sleep(d)
return 1, nil
}
}
go func(ch chan WorkUnit) {
for i := 0; i < 40; i++ {
go func(ch chan WorkUnit) {
m.RLock()
if closed {
m.RUnlock()
return
}
ch <- pool.Queue(newFunc(time.Second * 1))
m.RUnlock()
}(ch)
}
}(c)
time.Sleep(time.Second * 1)
pool.Cancel()
m.Lock()
closed = true
close(c)
m.Unlock()
var count int
for wu := range c {
wu.Wait()
if wu.Error() != nil {
_, ok := wu.Error().(*ErrCancelled)
if !ok {
_, ok = wu.Error().(*ErrPoolClosed)
if ok {
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
}
} else {
Equal(t, wu.Error().Error(), "ERROR: Work Unit Cancelled")
}
Equal(t, ok, true)
continue
}
count += wu.Value().(int)
}
NotEqual(t, count, 40)
// reset and test again
pool.Reset()
wrk := pool.Queue(newFunc(time.Millisecond * 300))
wrk.Wait()
_, ok := wrk.Value().(int)
Equal(t, ok, true)
wrk = pool.Queue(newFunc(time.Millisecond * 300))
time.Sleep(time.Second * 1)
wrk.Cancel()
wrk.Wait() // proving we don't get stuck here after cancel
Equal(t, wrk.Error(), nil)
pool.Reset() // testing that we can do this and nothing bad will happen as it checks if pool closed
pool.Close()
wu := pool.Queue(newFunc(time.Second * 1))
wu.Wait()
NotEqual(t, wu.Error(), nil)
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
}
func TestPanicRecovery(t *testing.T) {
pool := NewLimited(2)
defer pool.Close()
newFunc := func(d time.Duration, i int) WorkFunc {
return func(WorkUnit) (interface{}, error) {
if i == 1 {
panic("OMG OMG OMG! something bad happened!")
}
time.Sleep(d)
return 1, nil
}
}
var wrk WorkUnit
for i := 0; i < 4; i++ {
time.Sleep(time.Second * 1)
if i == 1 {
wrk = pool.Queue(newFunc(time.Second*1, i))
continue
}
pool.Queue(newFunc(time.Second*1, i))
}
wrk.Wait()
NotEqual(t, wrk.Error(), nil)
Equal(t, wrk.Error().Error()[0:90], "ERROR: Work Unit failed due to a recoverable error: 'OMG OMG OMG! something bad happened!'")
}
func TestBadWorkerCount(t *testing.T) {
PanicMatches(t, func() { NewLimited(0) }, "invalid workers '0'")
}

36
vendor/gopkg.in/go-playground/pool.v3/pool_test.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
package pool
import (
"os"
"testing"
)
// NOTES:
// - Run "go test" to run tests
// - Run "gocov test | gocov report" to report on test converage by file
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
//
// or
//
// -- may be a good idea to change to output path to somewherelike /tmp
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
//
// global pool for testing long running pool
var limitedGpool Pool
var unlimitedGpool Pool
func TestMain(m *testing.M) {
// setup
limitedGpool = NewLimited(4)
defer limitedGpool.Close()
unlimitedGpool = New()
defer unlimitedGpool.Close()
os.Exit(m.Run())
// teardown
}

View file

@ -0,0 +1,185 @@
package pool
import (
"testing"
"time"
)
func BenchmarkUnlimitedSmallRun(b *testing.B) {
res := make([]WorkUnit, 10)
b.ReportAllocs()
pool := New()
defer pool.Close()
fn := func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return 1, nil
}
for i := 0; i < 10; i++ {
res[i] = pool.Queue(fn)
}
var count int
for _, cw := range res {
cw.Wait()
if cw.Error() == nil {
count += cw.Value().(int)
}
}
if count != 10 {
b.Fatal("Count Incorrect")
}
}
func BenchmarkUnlimitedSmallCancel(b *testing.B) {
res := make([]WorkUnit, 0, 20)
b.ReportAllocs()
pool := New()
defer pool.Close()
newFunc := func(i int) WorkFunc {
return func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return i, nil
}
}
for i := 0; i < 20; i++ {
if i == 6 {
pool.Cancel()
}
res = append(res, pool.Queue(newFunc(i)))
}
for _, wrk := range res {
if wrk == nil {
continue
}
wrk.Wait()
}
}
func BenchmarkUnlimitedLargeCancel(b *testing.B) {
res := make([]WorkUnit, 0, 1000)
b.ReportAllocs()
pool := New()
defer pool.Close()
newFunc := func(i int) WorkFunc {
return func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return i, nil
}
}
for i := 0; i < 1000; i++ {
if i == 6 {
pool.Cancel()
}
res = append(res, pool.Queue(newFunc(i)))
}
for _, wrk := range res {
if wrk == nil {
continue
}
wrk.Wait()
}
}
func BenchmarkUnlimitedLargeRun(b *testing.B) {
res := make([]WorkUnit, 100)
b.ReportAllocs()
pool := New()
defer pool.Close()
newFunc := func(i int) WorkFunc {
return func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return 1, nil
}
}
for i := 0; i < 100; i++ {
res[i] = pool.Queue(newFunc(i))
}
var count int
for _, cw := range res {
cw.Wait()
count += cw.Value().(int)
}
if count != 100 {
b.Fatalf("Count Incorrect, Expected '100' Got '%d'", count)
}
}
func BenchmarkUnlimitedBatchSmallRun(b *testing.B) {
fn := func(wu WorkUnit) (interface{}, error) {
time.Sleep(time.Millisecond * 500)
if wu.IsCancelled() {
return nil, nil
}
time.Sleep(time.Millisecond * 500)
return 1, nil
}
pool := New()
defer pool.Close()
batch := pool.Batch()
for i := 0; i < 10; i++ {
batch.Queue(fn)
}
batch.QueueComplete()
var count int
for cw := range batch.Results() {
count += cw.Value().(int)
}
if count != 10 {
b.Fatal("Count Incorrect")
}
}

View file

@ -0,0 +1,194 @@
package pool
import (
"sync"
"testing"
"time"
. "gopkg.in/go-playground/assert.v1"
)
// NOTES:
// - Run "go test" to run tests
// - Run "gocov test | gocov report" to report on test converage by file
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
//
// or
//
// -- may be a good idea to change to output path to somewherelike /tmp
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
//
func TestUnlimitedPool(t *testing.T) {
var res []WorkUnit
pool := New()
defer pool.Close()
newFunc := func(d time.Duration) WorkFunc {
return func(WorkUnit) (interface{}, error) {
time.Sleep(d)
return nil, nil
}
}
for i := 0; i < 4; i++ {
wu := pool.Queue(newFunc(time.Second * 1))
res = append(res, wu)
}
var count int
for _, wu := range res {
wu.Wait()
Equal(t, wu.Error(), nil)
Equal(t, wu.Value(), nil)
count++
}
Equal(t, count, 4)
pool.Close() // testing no error occurs as Close will be called twice once defer pool.Close() fires
}
func TestUnlimitedCancel(t *testing.T) {
m := new(sync.RWMutex)
var closed bool
c := make(chan WorkUnit, 100)
pool := unlimitedGpool
defer pool.Close()
newFunc := func(d time.Duration) WorkFunc {
return func(WorkUnit) (interface{}, error) {
time.Sleep(d)
return 1, nil
}
}
go func(ch chan WorkUnit) {
for i := 0; i < 40; i++ {
go func(ch chan WorkUnit) {
m.RLock()
if !closed {
ch <- pool.Queue(newFunc(time.Second * 1))
}
m.RUnlock()
}(ch)
}
}(c)
time.Sleep(time.Second * 1)
pool.Cancel()
m.Lock()
closed = true
close(c)
m.Unlock()
var count int
for wu := range c {
wu.Wait()
if wu.Error() != nil {
_, ok := wu.Error().(*ErrCancelled)
if !ok {
_, ok = wu.Error().(*ErrPoolClosed)
if ok {
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
}
} else {
Equal(t, wu.Error().Error(), "ERROR: Work Unit Cancelled")
}
Equal(t, ok, true)
continue
}
count += wu.Value().(int)
}
NotEqual(t, count, 40)
// reset and test again
pool.Reset()
wrk := pool.Queue(newFunc(time.Millisecond * 300))
wrk.Wait()
_, ok := wrk.Value().(int)
Equal(t, ok, true)
wrk = pool.Queue(newFunc(time.Millisecond * 300))
time.Sleep(time.Second * 1)
wrk.Cancel()
wrk.Wait() // proving we don't get stuck here after cancel
Equal(t, wrk.Error(), nil)
pool.Reset() // testing that we can do this and nothing bad will happen as it checks if pool closed
pool.Close()
wu := pool.Queue(newFunc(time.Second * 1))
wu.Wait()
NotEqual(t, wu.Error(), nil)
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
}
func TestCancelFromWithin(t *testing.T) {
pool := New()
defer pool.Close()
newFunc := func(d time.Duration) WorkFunc {
return func(wu WorkUnit) (interface{}, error) {
time.Sleep(d)
if wu.IsCancelled() {
return nil, nil
}
return 1, nil
}
}
q := pool.Queue(newFunc(time.Second * 5))
time.Sleep(time.Second * 2)
pool.Cancel()
Equal(t, q.Value() == nil, true)
NotEqual(t, q.Error(), nil)
Equal(t, q.Error().Error(), "ERROR: Work Unit Cancelled")
}
func TestUnlimitedPanicRecovery(t *testing.T) {
pool := New()
defer pool.Close()
newFunc := func(d time.Duration, i int) WorkFunc {
return func(WorkUnit) (interface{}, error) {
if i == 1 {
panic("OMG OMG OMG! something bad happened!")
}
time.Sleep(d)
return 1, nil
}
}
var wrk WorkUnit
for i := 0; i < 4; i++ {
time.Sleep(time.Second * 1)
if i == 1 {
wrk = pool.Queue(newFunc(time.Second*1, i))
continue
}
pool.Queue(newFunc(time.Second*1, i))
}
wrk.Wait()
NotEqual(t, wrk.Error(), nil)
Equal(t, wrk.Error().Error()[0:90], "ERROR: Work Unit failed due to a recoverable error: 'OMG OMG OMG! something bad happened!'")
}

210
vendor/gopkg.in/inf.v0/benchmark_test.go generated vendored Normal file
View file

@ -0,0 +1,210 @@
package inf
import (
"fmt"
"math/big"
"math/rand"
"sync"
"testing"
)
const maxcap = 1024 * 1024
const bits = 256
const maxscale = 32
var once sync.Once
var decInput [][2]Dec
var intInput [][2]big.Int
var initBench = func() {
decInput = make([][2]Dec, maxcap)
intInput = make([][2]big.Int, maxcap)
max := new(big.Int).Lsh(big.NewInt(1), bits)
r := rand.New(rand.NewSource(0))
for i := 0; i < cap(decInput); i++ {
decInput[i][0].SetUnscaledBig(new(big.Int).Rand(r, max)).
SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
decInput[i][1].SetUnscaledBig(new(big.Int).Rand(r, max)).
SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
}
for i := 0; i < cap(intInput); i++ {
intInput[i][0].Rand(r, max)
intInput[i][1].Rand(r, max)
}
}
func doBenchmarkDec1(b *testing.B, f func(z *Dec)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&decInput[i%maxcap][0])
}
}
func doBenchmarkDec2(b *testing.B, f func(x, y *Dec)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&decInput[i%maxcap][0], &decInput[i%maxcap][1])
}
}
func doBenchmarkInt1(b *testing.B, f func(z *big.Int)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&intInput[i%maxcap][0])
}
}
func doBenchmarkInt2(b *testing.B, f func(x, y *big.Int)) {
once.Do(initBench)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
f(&intInput[i%maxcap][0], &intInput[i%maxcap][1])
}
}
func Benchmark_Dec_String(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
x.String()
})
}
func Benchmark_Dec_StringScan(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
s := x.String()
d := new(Dec)
fmt.Sscan(s, d)
})
}
func Benchmark_Dec_GobEncode(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
x.GobEncode()
})
}
func Benchmark_Dec_GobEnDecode(b *testing.B) {
doBenchmarkDec1(b, func(x *Dec) {
g, _ := x.GobEncode()
new(Dec).GobDecode(g)
})
}
func Benchmark_Dec_Add(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
ys := y.Scale()
y.SetScale(x.Scale())
_ = new(Dec).Add(x, y)
y.SetScale(ys)
})
}
func Benchmark_Dec_AddMixed(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).Add(x, y)
})
}
func Benchmark_Dec_Sub(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
ys := y.Scale()
y.SetScale(x.Scale())
_ = new(Dec).Sub(x, y)
y.SetScale(ys)
})
}
func Benchmark_Dec_SubMixed(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).Sub(x, y)
})
}
func Benchmark_Dec_Mul(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).Mul(x, y)
})
}
func Benchmark_Dec_Mul_QuoExact(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
v := new(Dec).Mul(x, y)
_ = new(Dec).QuoExact(v, y)
})
}
func Benchmark_Dec_QuoRound_Fixed_Down(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).QuoRound(x, y, 0, RoundDown)
})
}
func Benchmark_Dec_QuoRound_Fixed_HalfUp(b *testing.B) {
doBenchmarkDec2(b, func(x, y *Dec) {
_ = new(Dec).QuoRound(x, y, 0, RoundHalfUp)
})
}
func Benchmark_Int_String(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
x.String()
})
}
func Benchmark_Int_StringScan(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
s := x.String()
d := new(big.Int)
fmt.Sscan(s, d)
})
}
func Benchmark_Int_GobEncode(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
x.GobEncode()
})
}
func Benchmark_Int_GobEnDecode(b *testing.B) {
doBenchmarkInt1(b, func(x *big.Int) {
g, _ := x.GobEncode()
new(big.Int).GobDecode(g)
})
}
func Benchmark_Int_Add(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Add(x, y)
})
}
func Benchmark_Int_Sub(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Sub(x, y)
})
}
func Benchmark_Int_Mul(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Mul(x, y)
})
}
func Benchmark_Int_Quo(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_ = new(big.Int).Quo(x, y)
})
}
func Benchmark_Int_QuoRem(b *testing.B) {
doBenchmarkInt2(b, func(x, y *big.Int) {
_, _ = new(big.Int).QuoRem(x, y, new(big.Int))
})
}

2
vendor/gopkg.in/inf.v0/dec.go generated vendored
View file

@ -20,7 +20,7 @@
// + combined operations such as AddRound/MulAdd etc
// + exchanging data in decimal32/64/128 formats
//
package inf
package inf // import "gopkg.in/inf.v0"
// TODO:
// - avoid excessive deep copying (quo and rounders)

33
vendor/gopkg.in/inf.v0/dec_go1_2_test.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
// +build go1.2
package inf
import (
"encoding"
"encoding/json"
"testing"
)
var _ encoding.TextMarshaler = new(Dec)
var _ encoding.TextUnmarshaler = new(Dec)
type Obj struct {
Val *Dec
}
func TestDecJsonMarshalUnmarshal(t *testing.T) {
o := Obj{Val: NewDec(123, 2)}
js, err := json.Marshal(o)
if err != nil {
t.Fatalf("json.Marshal(%v): got %v, want ok", o, err)
}
o2 := &Obj{}
err = json.Unmarshal(js, o2)
if err != nil {
t.Fatalf("json.Unmarshal(%#q): got %v, want ok", js, err)
}
if o.Val.Scale() != o2.Val.Scale() ||
o.Val.UnscaledBig().Cmp(o2.Val.UnscaledBig()) != 0 {
t.Fatalf("json.Unmarshal(json.Marshal(%v)): want %v, got %v", o, o, o2)
}
}

40
vendor/gopkg.in/inf.v0/dec_internal_test.go generated vendored Normal file
View file

@ -0,0 +1,40 @@
package inf
import (
"math/big"
"testing"
)
var decQuoRemZZZ = []struct {
z, x, y *Dec
r *big.Rat
srA, srB int
}{
// basic examples
{NewDec(1, 0), NewDec(2, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
{NewDec(15, 1), NewDec(3, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
{NewDec(1, 1), NewDec(1, 0), NewDec(10, 0), big.NewRat(0, 1), 0, 1},
{NewDec(0, 0), NewDec(2, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
{NewDec(0, 0), NewDec(2, 0), NewDec(6, 0), big.NewRat(1, 3), 1, 1},
{NewDec(1, 1), NewDec(2, 0), NewDec(12, 0), big.NewRat(2, 3), 1, 1},
// examples from the Go Language Specification
{NewDec(1, 0), NewDec(5, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
{NewDec(-1, 0), NewDec(-5, 0), NewDec(3, 0), big.NewRat(-2, 3), -1, 1},
{NewDec(-1, 0), NewDec(5, 0), NewDec(-3, 0), big.NewRat(-2, 3), 1, -1},
{NewDec(1, 0), NewDec(-5, 0), NewDec(-3, 0), big.NewRat(2, 3), -1, -1},
}
func TestDecQuoRem(t *testing.T) {
for i, a := range decQuoRemZZZ {
z, rA, rB := new(Dec), new(big.Int), new(big.Int)
s := scaleQuoExact{}.Scale(a.x, a.y)
z.quoRem(a.x, a.y, s, true, rA, rB)
if a.z.Cmp(z) != 0 || a.r.Cmp(new(big.Rat).SetFrac(rA, rB)) != 0 {
t.Errorf("#%d QuoRemZZZ got %v, %v, %v; expected %v, %v", i, z, rA, rB, a.z, a.r)
}
if a.srA != rA.Sign() || a.srB != rB.Sign() {
t.Errorf("#%d QuoRemZZZ wrong signs, got %v, %v; expected %v, %v", i, rA.Sign(), rB.Sign(), a.srA, a.srB)
}
}
}

379
vendor/gopkg.in/inf.v0/dec_test.go generated vendored Normal file
View file

@ -0,0 +1,379 @@
package inf_test
import (
"bytes"
"encoding/gob"
"fmt"
"math/big"
"strings"
"testing"
"gopkg.in/inf.v0"
)
type decFunZZ func(z, x, y *inf.Dec) *inf.Dec
type decArgZZ struct {
z, x, y *inf.Dec
}
var decSumZZ = []decArgZZ{
{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
{inf.NewDec(1111111110, 0), inf.NewDec(123456789, 0), inf.NewDec(987654321, 0)},
{inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(0, 0)},
{inf.NewDec(864197532, 0), inf.NewDec(-123456789, 0), inf.NewDec(987654321, 0)},
{inf.NewDec(-1111111110, 0), inf.NewDec(-123456789, 0), inf.NewDec(-987654321, 0)},
{inf.NewDec(12, 2), inf.NewDec(1, 1), inf.NewDec(2, 2)},
}
var decProdZZ = []decArgZZ{
{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
{inf.NewDec(0, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(1, 0)},
{inf.NewDec(-991*991, 0), inf.NewDec(991, 0), inf.NewDec(-991, 0)},
{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
{inf.NewDec(2, -3), inf.NewDec(1, -1), inf.NewDec(2, -2)},
{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
}
func TestDecSignZ(t *testing.T) {
var zero inf.Dec
for _, a := range decSumZZ {
s := a.z.Sign()
e := a.z.Cmp(&zero)
if s != e {
t.Errorf("got %d; want %d for z = %v", s, e, a.z)
}
}
}
func TestDecAbsZ(t *testing.T) {
var zero inf.Dec
for _, a := range decSumZZ {
var z inf.Dec
z.Abs(a.z)
var e inf.Dec
e.Set(a.z)
if e.Cmp(&zero) < 0 {
e.Sub(&zero, &e)
}
if z.Cmp(&e) != 0 {
t.Errorf("got z = %v; want %v", z, e)
}
}
}
func testDecFunZZ(t *testing.T, msg string, f decFunZZ, a decArgZZ) {
var z inf.Dec
f(&z, a.x, a.y)
if (&z).Cmp(a.z) != 0 {
t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
}
}
func TestDecSumZZ(t *testing.T) {
AddZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Add(x, y) }
SubZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Sub(x, y) }
for _, a := range decSumZZ {
arg := a
testDecFunZZ(t, "AddZZ", AddZZ, arg)
arg = decArgZZ{a.z, a.y, a.x}
testDecFunZZ(t, "AddZZ symmetric", AddZZ, arg)
arg = decArgZZ{a.x, a.z, a.y}
testDecFunZZ(t, "SubZZ", SubZZ, arg)
arg = decArgZZ{a.y, a.z, a.x}
testDecFunZZ(t, "SubZZ symmetric", SubZZ, arg)
}
}
func TestDecProdZZ(t *testing.T) {
MulZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Mul(x, y) }
for _, a := range decProdZZ {
arg := a
testDecFunZZ(t, "MulZZ", MulZZ, arg)
arg = decArgZZ{a.z, a.y, a.x}
testDecFunZZ(t, "MulZZ symmetric", MulZZ, arg)
}
}
var decUnscaledTests = []struct {
d *inf.Dec
u int64 // ignored when ok == false
ok bool
}{
{new(inf.Dec), 0, true},
{inf.NewDec(-1<<63, 0), -1 << 63, true},
{inf.NewDec(-(-1<<63 + 1), 0), -(-1<<63 + 1), true},
{new(inf.Dec).Neg(inf.NewDec(-1<<63, 0)), 0, false},
{new(inf.Dec).Sub(inf.NewDec(-1<<63, 0), inf.NewDec(1, 0)), 0, false},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), 0, false},
}
func TestDecUnscaled(t *testing.T) {
for i, tt := range decUnscaledTests {
u, ok := tt.d.Unscaled()
if ok != tt.ok {
t.Errorf("#%d Unscaled: got %v, expected %v", i, ok, tt.ok)
} else if ok && u != tt.u {
t.Errorf("#%d Unscaled: got %v, expected %v", i, u, tt.u)
}
}
}
var decRoundTests = [...]struct {
in *inf.Dec
s inf.Scale
r inf.Rounder
exp *inf.Dec
}{
{inf.NewDec(123424999999999993, 15), 2, inf.RoundHalfUp, inf.NewDec(12342, 2)},
{inf.NewDec(123425000000000001, 15), 2, inf.RoundHalfUp, inf.NewDec(12343, 2)},
{inf.NewDec(123424999999999993, 15), 15, inf.RoundHalfUp, inf.NewDec(123424999999999993, 15)},
{inf.NewDec(123424999999999993, 15), 16, inf.RoundHalfUp, inf.NewDec(1234249999999999930, 16)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -1, inf.RoundHalfUp, inf.NewDec(1844674407370955162, -1)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -2, inf.RoundHalfUp, inf.NewDec(184467440737095516, -2)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -3, inf.RoundHalfUp, inf.NewDec(18446744073709552, -3)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -4, inf.RoundHalfUp, inf.NewDec(1844674407370955, -4)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -5, inf.RoundHalfUp, inf.NewDec(184467440737096, -5)},
{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -6, inf.RoundHalfUp, inf.NewDec(18446744073710, -6)},
}
func TestDecRound(t *testing.T) {
for i, tt := range decRoundTests {
z := new(inf.Dec).Round(tt.in, tt.s, tt.r)
if tt.exp.Cmp(z) != 0 {
t.Errorf("#%d Round got %v; expected %v", i, z, tt.exp)
}
}
}
var decStringTests = []struct {
in string
out string
val int64
scale inf.Scale // skip SetString if negative
ok bool
scanOk bool
}{
{in: "", ok: false, scanOk: false},
{in: "a", ok: false, scanOk: false},
{in: "z", ok: false, scanOk: false},
{in: "+", ok: false, scanOk: false},
{in: "-", ok: false, scanOk: false},
{in: "g", ok: false, scanOk: false},
{in: ".", ok: false, scanOk: false},
{in: ".-0", ok: false, scanOk: false},
{in: ".+0", ok: false, scanOk: false},
// Scannable but not SetStringable
{"0b", "ignored", 0, 0, false, true},
{"0x", "ignored", 0, 0, false, true},
{"0xg", "ignored", 0, 0, false, true},
{"0.0g", "ignored", 0, 1, false, true},
// examples from godoc for Dec
{"0", "0", 0, 0, true, true},
{"0.00", "0.00", 0, 2, true, true},
{"ignored", "0", 0, -2, true, false},
{"1", "1", 1, 0, true, true},
{"1.00", "1.00", 100, 2, true, true},
{"10", "10", 10, 0, true, true},
{"ignored", "10", 1, -1, true, false},
// other tests
{"+0", "0", 0, 0, true, true},
{"-0", "0", 0, 0, true, true},
{"0.0", "0.0", 0, 1, true, true},
{"0.1", "0.1", 1, 1, true, true},
{"0.", "0", 0, 0, true, true},
{"-10", "-10", -1, -1, true, true},
{"-1", "-1", -1, 0, true, true},
{"-0.1", "-0.1", -1, 1, true, true},
{"-0.01", "-0.01", -1, 2, true, true},
{"+0.", "0", 0, 0, true, true},
{"-0.", "0", 0, 0, true, true},
{".0", "0.0", 0, 1, true, true},
{"+.0", "0.0", 0, 1, true, true},
{"-.0", "0.0", 0, 1, true, true},
{"0.0000000000", "0.0000000000", 0, 10, true, true},
{"0.0000000001", "0.0000000001", 1, 10, true, true},
{"-0.0000000000", "0.0000000000", 0, 10, true, true},
{"-0.0000000001", "-0.0000000001", -1, 10, true, true},
{"-10", "-10", -10, 0, true, true},
{"+10", "10", 10, 0, true, true},
{"00", "0", 0, 0, true, true},
{"023", "23", 23, 0, true, true}, // decimal, not octal
{"-02.3", "-2.3", -23, 1, true, true}, // decimal, not octal
}
func TestDecGetString(t *testing.T) {
z := new(inf.Dec)
for i, test := range decStringTests {
if !test.ok {
continue
}
z.SetUnscaled(test.val)
z.SetScale(test.scale)
s := z.String()
if s != test.out {
t.Errorf("#%da got %s; want %s", i, s, test.out)
}
s = fmt.Sprintf("%d", z)
if s != test.out {
t.Errorf("#%db got %s; want %s", i, s, test.out)
}
}
}
func TestDecSetString(t *testing.T) {
tmp := new(inf.Dec)
for i, test := range decStringTests {
if test.scale < 0 {
// SetString only supports scale >= 0
continue
}
// initialize to a non-zero value so that issues with parsing
// 0 are detected
tmp.Set(inf.NewDec(1234567890, 123))
n1, ok1 := new(inf.Dec).SetString(test.in)
n2, ok2 := tmp.SetString(test.in)
expected := inf.NewDec(test.val, test.scale)
if ok1 != test.ok || ok2 != test.ok {
t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
continue
}
if !ok1 {
if n1 != nil {
t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
}
continue
}
if !ok2 {
if n2 != nil {
t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
}
continue
}
if n1.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
}
if n2.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
}
}
}
func TestDecScan(t *testing.T) {
tmp := new(inf.Dec)
for i, test := range decStringTests {
if test.scale < 0 {
// SetString only supports scale >= 0
continue
}
// initialize to a non-zero value so that issues with parsing
// 0 are detected
tmp.Set(inf.NewDec(1234567890, 123))
n1, n2 := new(inf.Dec), tmp
nn1, err1 := fmt.Sscan(test.in, n1)
nn2, err2 := fmt.Sscan(test.in, n2)
if !test.scanOk {
if err1 == nil || err2 == nil {
t.Errorf("#%d (input '%s') ok incorrect, should be %t", i, test.in, test.scanOk)
}
continue
}
expected := inf.NewDec(test.val, test.scale)
if nn1 != 1 || err1 != nil || nn2 != 1 || err2 != nil {
t.Errorf("#%d (input '%s') error %d %v, %d %v", i, test.in, nn1, err1, nn2, err2)
continue
}
if n1.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
}
if n2.Cmp(expected) != 0 {
t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
}
}
}
var decScanNextTests = []struct {
in string
ok bool
next rune
}{
{"", false, 0},
{"a", false, 'a'},
{"z", false, 'z'},
{"+", false, 0},
{"-", false, 0},
{"g", false, 'g'},
{".", false, 0},
{".-0", false, '-'},
{".+0", false, '+'},
{"0b", true, 'b'},
{"0x", true, 'x'},
{"0xg", true, 'x'},
{"0.0g", true, 'g'},
}
func TestDecScanNext(t *testing.T) {
for i, test := range decScanNextTests {
rdr := strings.NewReader(test.in)
n1 := new(inf.Dec)
nn1, _ := fmt.Fscan(rdr, n1)
if (test.ok && nn1 == 0) || (!test.ok && nn1 > 0) {
t.Errorf("#%d (input '%s') ok incorrect should be %t", i, test.in, test.ok)
continue
}
r := rune(0)
nn2, err := fmt.Fscanf(rdr, "%c", &r)
if test.next != r {
t.Errorf("#%d (input '%s') next incorrect, got %c should be %c, %d, %v", i, test.in, r, test.next, nn2, err)
}
}
}
var decGobEncodingTests = []string{
"0",
"1",
"2",
"10",
"42",
"1234567890",
"298472983472983471903246121093472394872319615612417471234712061",
}
func TestDecGobEncoding(t *testing.T) {
var medium bytes.Buffer
enc := gob.NewEncoder(&medium)
dec := gob.NewDecoder(&medium)
for i, test := range decGobEncodingTests {
for j := 0; j < 2; j++ {
for k := inf.Scale(-5); k <= 5; k++ {
medium.Reset() // empty buffer for each test case (in case of failures)
stest := test
if j != 0 {
// negative numbers
stest = "-" + test
}
var tx inf.Dec
tx.SetString(stest)
tx.SetScale(k) // test with positive, negative, and zero scale
if err := enc.Encode(&tx); err != nil {
t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
}
var rx inf.Dec
if err := dec.Decode(&rx); err != nil {
t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
}
if rx.Cmp(&tx) != 0 {
t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
}
}
}
}
}

62
vendor/gopkg.in/inf.v0/example_test.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package inf_test
import (
"fmt"
"log"
)
import "gopkg.in/inf.v0"
func ExampleDec_SetString() {
d := new(inf.Dec)
d.SetString("012345.67890") // decimal; leading 0 ignored; trailing 0 kept
fmt.Println(d)
// Output: 12345.67890
}
func ExampleDec_Scan() {
// The Scan function is rarely used directly;
// the fmt package recognizes it as an implementation of fmt.Scanner.
d := new(inf.Dec)
_, err := fmt.Sscan("184467440.73709551617", d)
if err != nil {
log.Println("error scanning value:", err)
} else {
fmt.Println(d)
}
// Output: 184467440.73709551617
}
func ExampleDec_QuoRound_scale2RoundDown() {
// 10 / 3 is an infinite decimal; it has no exact Dec representation
x, y := inf.NewDec(10, 0), inf.NewDec(3, 0)
// use 2 digits beyond the decimal point, round towards 0
z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundDown)
fmt.Println(z)
// Output: 3.33
}
func ExampleDec_QuoRound_scale2RoundCeil() {
// -42 / 400 is an finite decimal with 3 digits beyond the decimal point
x, y := inf.NewDec(-42, 0), inf.NewDec(400, 0)
// use 2 digits beyond decimal point, round towards positive infinity
z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundCeil)
fmt.Println(z)
// Output: -0.10
}
func ExampleDec_QuoExact_ok() {
// 1 / 25 is a finite decimal; it has exact Dec representation
x, y := inf.NewDec(1, 0), inf.NewDec(25, 0)
z := new(inf.Dec).QuoExact(x, y)
fmt.Println(z)
// Output: 0.04
}
func ExampleDec_QuoExact_fail() {
// 1 / 3 is an infinite decimal; it has no exact Dec representation
x, y := inf.NewDec(1, 0), inf.NewDec(3, 0)
z := new(inf.Dec).QuoExact(x, y)
fmt.Println(z)
// Output: <nil>
}

72
vendor/gopkg.in/inf.v0/rounder_example_test.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
package inf_test
import (
"fmt"
"os"
"text/tabwriter"
"gopkg.in/inf.v0"
)
// This example displays the results of Dec.Round with each of the Rounders.
//
func ExampleRounder() {
var vals = []struct {
x string
s inf.Scale
}{
{"-0.18", 1}, {"-0.15", 1}, {"-0.12", 1}, {"-0.10", 1},
{"-0.08", 1}, {"-0.05", 1}, {"-0.02", 1}, {"0.00", 1},
{"0.02", 1}, {"0.05", 1}, {"0.08", 1}, {"0.10", 1},
{"0.12", 1}, {"0.15", 1}, {"0.18", 1},
}
var rounders = []struct {
name string
rounder inf.Rounder
}{
{"RoundDown", inf.RoundDown}, {"RoundUp", inf.RoundUp},
{"RoundCeil", inf.RoundCeil}, {"RoundFloor", inf.RoundFloor},
{"RoundHalfDown", inf.RoundHalfDown}, {"RoundHalfUp", inf.RoundHalfUp},
{"RoundHalfEven", inf.RoundHalfEven}, {"RoundExact", inf.RoundExact},
}
fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):\n")
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight)
fmt.Fprint(w, "x\ts\t|\t")
for _, r := range rounders {
fmt.Fprintf(w, "%s\t", r.name[5:])
}
fmt.Fprintln(w)
for _, v := range vals {
fmt.Fprintf(w, "%s\t%d\t|\t", v.x, v.s)
for _, r := range rounders {
x, _ := new(inf.Dec).SetString(v.x)
z := new(inf.Dec).Round(x, v.s, r.rounder)
fmt.Fprintf(w, "%d\t", z)
}
fmt.Fprintln(w)
}
w.Flush()
// Output:
// The results of new(inf.Dec).Round(x, s, inf.RoundXXX):
//
// x s | Down Up Ceil Floor HalfDown HalfUp HalfEven Exact
// -0.18 1 | -0.1 -0.2 -0.1 -0.2 -0.2 -0.2 -0.2 <nil>
// -0.15 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.2 -0.2 <nil>
// -0.12 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.1 -0.1 <nil>
// -0.10 1 | -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1
// -0.08 1 | 0.0 -0.1 0.0 -0.1 -0.1 -0.1 -0.1 <nil>
// -0.05 1 | 0.0 -0.1 0.0 -0.1 0.0 -0.1 0.0 <nil>
// -0.02 1 | 0.0 -0.1 0.0 -0.1 0.0 0.0 0.0 <nil>
// 0.00 1 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
// 0.02 1 | 0.0 0.1 0.1 0.0 0.0 0.0 0.0 <nil>
// 0.05 1 | 0.0 0.1 0.1 0.0 0.0 0.1 0.0 <nil>
// 0.08 1 | 0.0 0.1 0.1 0.0 0.1 0.1 0.1 <nil>
// 0.10 1 | 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
// 0.12 1 | 0.1 0.2 0.2 0.1 0.1 0.1 0.1 <nil>
// 0.15 1 | 0.1 0.2 0.2 0.1 0.1 0.2 0.2 <nil>
// 0.18 1 | 0.1 0.2 0.2 0.1 0.2 0.2 0.2 <nil>
}

109
vendor/gopkg.in/inf.v0/rounder_test.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
package inf_test
import (
"math/big"
"testing"
"gopkg.in/inf.v0"
)
var decRounderInputs = [...]struct {
quo *inf.Dec
rA, rB *big.Int
}{
// examples from go language spec
{inf.NewDec(1, 0), big.NewInt(2), big.NewInt(3)}, // 5 / 3
{inf.NewDec(-1, 0), big.NewInt(-2), big.NewInt(3)}, // -5 / 3
{inf.NewDec(-1, 0), big.NewInt(2), big.NewInt(-3)}, // 5 / -3
{inf.NewDec(1, 0), big.NewInt(-2), big.NewInt(-3)}, // -5 / -3
// examples from godoc
{inf.NewDec(-1, 1), big.NewInt(-8), big.NewInt(10)},
{inf.NewDec(-1, 1), big.NewInt(-5), big.NewInt(10)},
{inf.NewDec(-1, 1), big.NewInt(-2), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(-8), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(-5), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(-2), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(0), big.NewInt(1)},
{inf.NewDec(0, 1), big.NewInt(2), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(5), big.NewInt(10)},
{inf.NewDec(0, 1), big.NewInt(8), big.NewInt(10)},
{inf.NewDec(1, 1), big.NewInt(2), big.NewInt(10)},
{inf.NewDec(1, 1), big.NewInt(5), big.NewInt(10)},
{inf.NewDec(1, 1), big.NewInt(8), big.NewInt(10)},
}
var decRounderResults = [...]struct {
rounder inf.Rounder
results [len(decRounderInputs)]*inf.Dec
}{
{inf.RoundExact, [...]*inf.Dec{nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil,
inf.NewDec(0, 1), nil, nil, nil, nil, nil, nil}},
{inf.RoundDown, [...]*inf.Dec{
inf.NewDec(1, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(1, 0),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
{inf.RoundUp, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
{inf.RoundHalfDown, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(2, 1)}},
{inf.RoundHalfUp, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
{inf.RoundHalfEven, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
{inf.RoundFloor, [...]*inf.Dec{
inf.NewDec(1, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(1, 0),
inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
{inf.RoundCeil, [...]*inf.Dec{
inf.NewDec(2, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(2, 0),
inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
inf.NewDec(0, 1),
inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
}
func TestDecRounders(t *testing.T) {
for i, a := range decRounderResults {
for j, input := range decRounderInputs {
q := new(inf.Dec).Set(input.quo)
rA, rB := new(big.Int).Set(input.rA), new(big.Int).Set(input.rB)
res := a.rounder.Round(new(inf.Dec), q, rA, rB)
if a.results[j] == nil && res == nil {
continue
}
if (a.results[j] == nil && res != nil) ||
(a.results[j] != nil && res == nil) ||
a.results[j].Cmp(res) != 0 {
t.Errorf("#%d,%d Rounder got %v; expected %v", i, j, res, a.results[j])
}
}
}
}

24
vendor/gopkg.in/warnings.v0/LICENSE generated vendored
View file

@ -1,24 +0,0 @@
Copyright (c) 2016 Péter Surányi.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

74
vendor/gopkg.in/warnings.v0/README generated vendored
View file

@ -1,74 +0,0 @@
Package warnings implements error handling with non-fatal errors (warnings).
import path: "gopkg.in/warnings.v0"
package docs: https://godoc.org/gopkg.in/warnings.v0
issues: https://github.com/go-warnings/warnings/issues
pull requests: https://github.com/go-warnings/warnings/pulls
A recurring pattern in Go programming is the following:
func myfunc(params) error {
if err := doSomething(...); err != nil {
return err
}
if err := doSomethingElse(...); err != nil {
return err
}
if ok := doAnotherThing(...); !ok {
return errors.New("my error")
}
...
return nil
}
This pattern allows interrupting the flow on any received error. But what if
there are errors that should be noted but still not fatal, for which the flow
should not be interrupted? Implementing such logic at each if statement would
make the code complex and the flow much harder to follow.
Package warnings provides the Collector type and a clean and simple pattern
for achieving such logic. The Collector takes care of deciding when to break
the flow and when to continue, collecting any non-fatal errors (warnings)
along the way. The only requirement is that fatal and non-fatal errors can be
distinguished programmatically; that is a function such as
IsFatal(error) bool
must be implemented. The following is an example of what the above snippet
could look like using the warnings package:
import "gopkg.in/warnings.v0"
func isFatal(err error) bool {
_, ok := err.(WarningType)
return !ok
}
func myfunc(params) error {
c := warnings.NewCollector(isFatal)
c.FatalWithWarnings = true
if err := c.Collect(doSomething()); err != nil {
return err
}
if err := c.Collect(doSomethingElse(...)); err != nil {
return err
}
if ok := doAnotherThing(...); !ok {
if err := c.Collect(errors.New("my error")); err != nil {
return err
}
}
...
return c.Done()
}
Rules for using warnings
- ensure that warnings are programmatically distinguishable from fatal
errors (i.e. implement an isFatal function and any necessary error types)
- ensure that there is a single Collector instance for a call of each
exported function
- ensure that all errors (fatal or warning) are fed through Collect
- ensure that every time an error is returned, it is one returned by a
Collector (from Collect or Done)
- ensure that Collect is never called after Done

View file

@ -1,191 +0,0 @@
// Package warnings implements error handling with non-fatal errors (warnings).
//
// A recurring pattern in Go programming is the following:
//
// func myfunc(params) error {
// if err := doSomething(...); err != nil {
// return err
// }
// if err := doSomethingElse(...); err != nil {
// return err
// }
// if ok := doAnotherThing(...); !ok {
// return errors.New("my error")
// }
// ...
// return nil
// }
//
// This pattern allows interrupting the flow on any received error. But what if
// there are errors that should be noted but still not fatal, for which the flow
// should not be interrupted? Implementing such logic at each if statement would
// make the code complex and the flow much harder to follow.
//
// Package warnings provides the Collector type and a clean and simple pattern
// for achieving such logic. The Collector takes care of deciding when to break
// the flow and when to continue, collecting any non-fatal errors (warnings)
// along the way. The only requirement is that fatal and non-fatal errors can be
// distinguished programmatically; that is a function such as
//
// IsFatal(error) bool
//
// must be implemented. The following is an example of what the above snippet
// could look like using the warnings package:
//
// import "gopkg.in/warnings.v0"
//
// func isFatal(err error) bool {
// _, ok := err.(WarningType)
// return !ok
// }
//
// func myfunc(params) error {
// c := warnings.NewCollector(isFatal)
// c.FatalWithWarnings = true
// if err := c.Collect(doSomething()); err != nil {
// return err
// }
// if err := c.Collect(doSomethingElse(...)); err != nil {
// return err
// }
// if ok := doAnotherThing(...); !ok {
// if err := c.Collect(errors.New("my error")); err != nil {
// return err
// }
// }
// ...
// return c.Done()
// }
//
// Rules for using warnings
//
// - ensure that warnings are programmatically distinguishable from fatal
// errors (i.e. implement an isFatal function and any necessary error types)
// - ensure that there is a single Collector instance for a call of each
// exported function
// - ensure that all errors (fatal or warning) are fed through Collect
// - ensure that every time an error is returned, it is one returned by a
// Collector (from Collect or Done)
// - ensure that Collect is never called after Done
//
// TODO
//
// - optionally limit the number of warnings (e.g. stop after 20 warnings) (?)
// - consider interaction with contexts
// - go vet-style invocations verifier
// - semi-automatic code converter
//
package warnings
import (
"bytes"
"fmt"
)
// List holds a collection of warnings and optionally one fatal error.
type List struct {
Warnings []error
Fatal error
}
// Error implements the error interface.
func (l List) Error() string {
b := bytes.NewBuffer(nil)
if l.Fatal != nil {
fmt.Fprintln(b, "fatal:")
fmt.Fprintln(b, l.Fatal)
}
switch len(l.Warnings) {
case 0:
// nop
case 1:
fmt.Fprintln(b, "warning:")
default:
fmt.Fprintln(b, "warnings:")
}
for _, err := range l.Warnings {
fmt.Fprintln(b, err)
}
return b.String()
}
// A Collector collects errors up to the first fatal error.
type Collector struct {
// IsFatal distinguishes between warnings and fatal errors.
IsFatal func(error) bool
// FatalWithWarnings set to true means that a fatal error is returned as
// a List together with all warnings so far. The default behavior is to
// only return the fatal error and discard any warnings that have been
// collected.
FatalWithWarnings bool
l List
done bool
}
// NewCollector returns a new Collector; it uses isFatal to distinguish between
// warnings and fatal errors.
func NewCollector(isFatal func(error) bool) *Collector {
return &Collector{IsFatal: isFatal}
}
// Collect collects a single error (warning or fatal). It returns nil if
// collection can continue (only warnings so far), or otherwise the errors
// collected. Collect mustn't be called after the first fatal error or after
// Done has been called.
func (c *Collector) Collect(err error) error {
if c.done {
panic("warnings.Collector already done")
}
if err == nil {
return nil
}
if c.IsFatal(err) {
c.done = true
c.l.Fatal = err
} else {
c.l.Warnings = append(c.l.Warnings, err)
}
if c.l.Fatal != nil {
return c.erorr()
}
return nil
}
// Done ends collection and returns the collected error(s).
func (c *Collector) Done() error {
c.done = true
return c.erorr()
}
func (c *Collector) erorr() error {
if !c.FatalWithWarnings && c.l.Fatal != nil {
return c.l.Fatal
}
if c.l.Fatal == nil && len(c.l.Warnings) == 0 {
return nil
}
// Note that a single warning is also returned as a List. This is to make it
// easier to determine fatal-ness of the returned error.
return c.l
}
// FatalOnly returns the fatal error, if any, **in an error returned by a
// Collector**. It returns nil if and only if err is nil or err is a List
// with err.Fatal == nil.
func FatalOnly(err error) error {
l, ok := err.(List)
if !ok {
return err
}
return l.Fatal
}
// WarningsOnly returns the warnings **in an error returned by a Collector**.
func WarningsOnly(err error) []error {
l, ok := err.(List)
if !ok {
return nil
}
return l.Warnings
}

9
vendor/gopkg.in/yaml.v2/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,9 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip
go_import_path: gopkg.in/yaml.v2

325
vendor/gopkg.in/yaml.v2/LICENSE generated vendored
View file

@ -1,188 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright (c) 2011-2014 - Canonical Inc.
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
This software is licensed under the LGPLv3, included below.
1. Definitions.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
0. Additional Definitions.
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
1. Exception to Section 3 of the GNU GPL.
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
2. Conveying Modified Versions.
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
3. Object Code Incorporating Material from Library Header Files.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
END OF TERMS AND CONDITIONS
4. Combined Works.
APPENDIX: How to apply the Apache License to your work.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
Copyright {yyyy} {name of copyright owner}
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
http://www.apache.org/licenses/LICENSE-2.0
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

4
vendor/gopkg.in/yaml.v2/README.md generated vendored
View file

@ -42,12 +42,14 @@ The package API for yaml v2 will remain stable as described in [gopkg.in](https:
License
-------
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
Example
-------
Some more examples can be found in the "examples" folder.
```Go
package main

10
vendor/gopkg.in/yaml.v2/decode.go generated vendored
View file

@ -120,7 +120,6 @@ func (p *parser) parse() *node {
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
panic("unreachable")
}
func (p *parser) node(kind int) *node {
@ -191,6 +190,7 @@ type decoder struct {
aliases map[string]bool
mapType reflect.Type
terrors []string
strict bool
}
var (
@ -200,8 +200,8 @@ var (
ifaceType = defaultMapType.Elem()
)
func newDecoder() *decoder {
d := &decoder{mapType: defaultMapType}
func newDecoder(strict bool) *decoder {
d := &decoder{mapType: defaultMapType, strict: strict}
d.aliases = make(map[string]bool)
return d
}
@ -251,7 +251,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
@ -640,6 +640,8 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
} else if d.strict {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type()))
}
}
return true

1017
vendor/gopkg.in/yaml.v2/decode_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -666,7 +666,6 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
return yaml_emitter_set_emitter_error(emitter,
"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
}
return false
}
// Expect ALIAS.
@ -995,7 +994,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
break_space = false
space_break = false
preceeded_by_whitespace = false
preceded_by_whitespace = false
followed_by_whitespace = false
previous_space = false
previous_break = false
@ -1017,7 +1016,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
flow_indicators = true
}
preceeded_by_whitespace = true
preceded_by_whitespace = true
for i, w := 0, 0; i < len(value); i += w {
w = width(value[i])
followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
@ -1048,7 +1047,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
block_indicators = true
}
case '#':
if preceeded_by_whitespace {
if preceded_by_whitespace {
flow_indicators = true
block_indicators = true
}
@ -1089,7 +1088,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
}
// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
preceeded_by_whitespace = is_blankz(value, i)
preceded_by_whitespace = is_blankz(value, i)
}
emitter.scalar_data.multiline = line_breaks

501
vendor/gopkg.in/yaml.v2/encode_test.go generated vendored Normal file
View file

@ -0,0 +1,501 @@
package yaml_test
import (
"fmt"
"math"
"strconv"
"strings"
"time"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
"net"
"os"
)
var marshalIntTest = 123
var marshalTests = []struct {
value interface{}
data string
}{
{
nil,
"null\n",
}, {
&struct{}{},
"{}\n",
}, {
map[string]string{"v": "hi"},
"v: hi\n",
}, {
map[string]interface{}{"v": "hi"},
"v: hi\n",
}, {
map[string]string{"v": "true"},
"v: \"true\"\n",
}, {
map[string]string{"v": "false"},
"v: \"false\"\n",
}, {
map[string]interface{}{"v": true},
"v: true\n",
}, {
map[string]interface{}{"v": false},
"v: false\n",
}, {
map[string]interface{}{"v": 10},
"v: 10\n",
}, {
map[string]interface{}{"v": -10},
"v: -10\n",
}, {
map[string]uint{"v": 42},
"v: 42\n",
}, {
map[string]interface{}{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]int64{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]uint64{"v": 4294967296},
"v: 4294967296\n",
}, {
map[string]interface{}{"v": "10"},
"v: \"10\"\n",
}, {
map[string]interface{}{"v": 0.1},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float64(0.1)},
"v: 0.1\n",
}, {
map[string]interface{}{"v": -0.1},
"v: -0.1\n",
}, {
map[string]interface{}{"v": math.Inf(+1)},
"v: .inf\n",
}, {
map[string]interface{}{"v": math.Inf(-1)},
"v: -.inf\n",
}, {
map[string]interface{}{"v": math.NaN()},
"v: .nan\n",
}, {
map[string]interface{}{"v": nil},
"v: null\n",
}, {
map[string]interface{}{"v": ""},
"v: \"\"\n",
}, {
map[string][]string{"v": []string{"A", "B"}},
"v:\n- A\n- B\n",
}, {
map[string][]string{"v": []string{"A", "B\nC"}},
"v:\n- A\n- |-\n B\n C\n",
}, {
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
}, {
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
"a:\n b: c\n",
}, {
map[string]interface{}{"a": "-"},
"a: '-'\n",
},
// Simple values.
{
&marshalIntTest,
"123\n",
},
// Structures
{
&struct{ Hello string }{"world"},
"hello: world\n",
}, {
&struct {
A struct {
B string
}
}{struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{&struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{},
"a: null\n",
}, {
&struct{ A int }{1},
"a: 1\n",
}, {
&struct{ A []int }{[]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct {
B int "a"
}{1},
"a: 1\n",
}, {
&struct{ A bool }{true},
"a: true\n",
},
// Conditional flag
{
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{1, 0},
"a: 1\n",
}, {
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{0, 0},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{nil},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{}},
"a: {x: 0}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{0, 1}},
"{}\n",
}, {
&struct {
A float64 "a,omitempty"
B float64 "b,omitempty"
}{1, 0},
"a: 1\n",
},
// Flow flag
{
&struct {
A []int "a,flow"
}{[]int{1, 2}},
"a: [1, 2]\n",
}, {
&struct {
A map[string]string "a,flow"
}{map[string]string{"b": "c", "d": "e"}},
"a: {b: c, d: e}\n",
}, {
&struct {
A struct {
B, D string
} "a,flow"
}{struct{ B, D string }{"c", "e"}},
"a: {b: c, d: e}\n",
},
// Unexported field
{
&struct {
u int
A int
}{0, 1},
"a: 1\n",
},
// Ignored field
{
&struct {
A int
B int "-"
}{1, 2},
"a: 1\n",
},
// Struct inlining
{
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
"a: 1\nb: 2\nc: 3\n",
},
// Map inlining
{
&struct {
A int
C map[string]int `yaml:",inline"`
}{1, map[string]int{"b": 2, "c": 3}},
"a: 1\nb: 2\nc: 3\n",
},
// Duration
{
map[string]time.Duration{"a": 3 * time.Second},
"a: 3s\n",
},
// Issue #24: bug in map merging logic.
{
map[string]string{"a": "<foo>"},
"a: <foo>\n",
},
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
// with old YAML 1.1 parsers.
{
map[string]string{"a": "1:1"},
"a: \"1:1\"\n",
},
// Binary data.
{
map[string]string{"a": "\x00"},
"a: \"\\0\"\n",
}, {
map[string]string{"a": "\x80\x81\x82"},
"a: !!binary gIGC\n",
}, {
map[string]string{"a": strings.Repeat("\x90", 54)},
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
},
// Ordered maps.
{
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
},
// Encode unicode as utf-8 rather than in escaped form.
{
map[string]string{"a": "你好"},
"a: 你好\n",
},
// Support encoding.TextMarshaler.
{
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
"a: 1.2.3.4\n",
},
{
map[string]time.Time{"a": time.Unix(1424801979, 0)},
"a: 2015-02-24T18:19:39Z\n",
},
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
{
map[string]string{"a": "b: c"},
"a: 'b: c'\n",
},
// Containing hash mark ('#') in string should be quoted
{
map[string]string{"a": "Hello #comment"},
"a: 'Hello #comment'\n",
},
{
map[string]string{"a": "你好 #comment"},
"a: '你好 #comment'\n",
},
}
func (s *S) TestMarshal(c *C) {
defer os.Setenv("TZ", os.Getenv("TZ"))
os.Setenv("TZ", "UTC")
for _, item := range marshalTests {
data, err := yaml.Marshal(item.value)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, item.data)
}
}
var marshalErrorTests = []struct {
value interface{}
error string
panic string
}{{
value: &struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
}, {
value: &struct {
A int
B map[string]int ",inline"
}{1, map[string]int{"a": 2}},
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
}}
func (s *S) TestMarshalErrors(c *C) {
for _, item := range marshalErrorTests {
if item.panic != "" {
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
} else {
_, err := yaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
}
}
}
func (s *S) TestMarshalTypeCache(c *C) {
var data []byte
var err error
func() {
type T struct{ A int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
func() {
type T struct{ B int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
c.Assert(string(data), Equals, "b: 0\n")
}
var marshalerTests = []struct {
data string
value interface{}
}{
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
{"_: 10\n", 10},
{"_: null\n", nil},
{"_: BAR!\n", "BAR!"},
}
type marshalerType struct {
value interface{}
}
func (o marshalerType) MarshalText() ([]byte, error) {
panic("MarshalText called on type with MarshalYAML")
}
func (o marshalerType) MarshalYAML() (interface{}, error) {
return o.value, nil
}
type marshalerValue struct {
Field marshalerType "_"
}
func (s *S) TestMarshaler(c *C) {
for _, item := range marshalerTests {
obj := &marshalerValue{}
obj.Field.value = item.value
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, string(item.data))
}
}
func (s *S) TestMarshalerWholeDocument(c *C) {
obj := &marshalerType{}
obj.value = map[string]string{"hello": "world!"}
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hello: world!\n")
}
type failingMarshaler struct{}
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
return nil, failingErr
}
func (s *S) TestMarshalerError(c *C) {
_, err := yaml.Marshal(&failingMarshaler{})
c.Assert(err, Equals, failingErr)
}
func (s *S) TestSortedOutput(c *C) {
order := []interface{}{
false,
true,
1,
uint(1),
1.0,
1.1,
1.2,
2,
uint(2),
2.0,
2.1,
"",
".1",
".2",
".a",
"1",
"2",
"a!10",
"a/2",
"a/10",
"a~10",
"ab/1",
"b/1",
"b/01",
"b/2",
"b/02",
"b/3",
"b/03",
"b1",
"b01",
"b3",
"c2.10",
"c10.2",
"d1",
"d12",
"d12a",
}
m := make(map[interface{}]int)
for _, k := range order {
m[k] = 1
}
data, err := yaml.Marshal(m)
c.Assert(err, IsNil)
out := "\n" + string(data)
last := 0
for i, k := range order {
repr := fmt.Sprint(k)
if s, ok := k.(string); ok {
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
repr = `"` + repr + `"`
}
}
index := strings.Index(out, "\n"+repr+":")
if index == -1 {
c.Fatalf("%#v is not in the output: %#v", k, out)
}
if index < last {
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
}
last = index
}
}

41
vendor/gopkg.in/yaml.v2/example_embedded_test.go generated vendored Normal file
View file

@ -0,0 +1,41 @@
package yaml_test
import (
"fmt"
"log"
"gopkg.in/yaml.v2"
)
// An example showing how to unmarshal embedded
// structs from YAML.
type StructA struct {
A string `yaml:"a"`
}
type StructB struct {
// Embedded structs are not treated as embedded in YAML by default. To do that,
// add the ",inline" annotation below
StructA `yaml:",inline"`
B string `yaml:"b"`
}
var data = `
a: a string from struct A
b: a string from struct B
`
func ExampleUnmarshal_embedded() {
var b StructB
err := yaml.Unmarshal([]byte(data), &b)
if err != nil {
log.Fatal("cannot unmarshal data: %v", err)
}
fmt.Println(b.A)
fmt.Println(b.B)
// Output:
// a string from struct A
// a string from struct B
}

1
vendor/gopkg.in/yaml.v2/parserc.go generated vendored
View file

@ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool
default:
panic("invalid parser state")
}
return false
}
// Parse the production:

7
vendor/gopkg.in/yaml.v2/readerc.go generated vendored
View file

@ -247,7 +247,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
@ -357,23 +357,26 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
buffer_len += width
parser.unread++
}

11
vendor/gopkg.in/yaml.v2/resolve.go generated vendored
View file

@ -3,6 +3,7 @@ package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"unicode/utf8"
@ -80,6 +81,8 @@ func resolvableTag(tag string) bool {
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
@ -135,9 +138,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
if err == nil {
return yaml_INT_TAG, uintv
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)

13
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored
View file

@ -9,7 +9,7 @@ import (
// ************
//
// The following notes assume that you are familiar with the YAML specification
// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
// some cases we are less restrictive that it requires.
//
// The process of transforming a YAML stream into a sequence of events is
@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co
if directive {
context = "while parsing a %TAG directive"
}
return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
}
func trace(args ...interface{}) func() {
@ -1546,7 +1546,7 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool
// Unknown directive.
} else {
yaml_parser_set_scanner_error(parser, "while scanning a directive",
start_mark, "found uknown directive name")
start_mark, "found unknown directive name")
return false
}
@ -1944,7 +1944,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
} else {
// It's either the '!' tag or not really a tag handle. If it's a %TAG
// directive, it's an error. If it's a tag token, it must be a part of URI.
if directive && !(s[0] == '!' && s[1] == 0) {
if directive && string(s) != "!" {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected '!'")
return false
@ -1959,6 +1959,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
//size_t length = head ? strlen((char *)head) : 0
var s []byte
hasTag := len(head) > 0
// Copy the head if needed.
//
@ -2000,10 +2001,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
hasTag = true
}
// Check if the tag is non-empty.
if len(s) == 0 {
if !hasTag {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected tag URI")
return false

12
vendor/gopkg.in/yaml.v2/suite_test.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
package yaml_test
import (
. "gopkg.in/check.v1"
"testing"
)
func Test(t *testing.T) { TestingT(t) }
type S struct{}
var _ = Suite(&S{})

15
vendor/gopkg.in/yaml.v2/yaml.go generated vendored
View file

@ -77,8 +77,19 @@ type Marshaler interface {
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// UnmarshalStrict is like Unmarshal except that any fields that are found
// in the data that do not have corresponding struct members will result in
// an error.
func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder()
d := newDecoder(strict)
p := newParser(in)
defer p.destroy()
node := p.parse()
@ -222,7 +233,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" {
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}

2
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored
View file

@ -508,7 +508,7 @@ type yaml_parser_t struct {
problem string // Error description.
// The byte about which the problem occured.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t