Add e2e tests
This commit is contained in:
parent
99a355f25d
commit
601fb7dacf
1163 changed files with 289217 additions and 14195 deletions
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package spec_iterator
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// We have more nodes than tests. Trivial case.
|
||||
if parallelTotal >= length {
|
||||
if parallelNode > length {
|
||||
return 0, 0
|
||||
} else {
|
||||
return parallelNode - 1, 1
|
||||
}
|
||||
}
|
||||
|
||||
// This is the minimum amount of tests that a node will be required to run
|
||||
minTestsPerNode := length / parallelTotal
|
||||
|
||||
// This is the maximum amount of tests that a node will be required to run
|
||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||
// and at most one more
|
||||
maxTestsPerNode := minTestsPerNode
|
||||
if length%parallelTotal != 0 {
|
||||
maxTestsPerNode++
|
||||
}
|
||||
|
||||
// Number of nodes that will have to run the maximum amount of tests per node
|
||||
numMaxLoadNodes := length % parallelTotal
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||
var numPrecedingMaxLoadNodes int
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||
} else {
|
||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||
}
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||
var numPrecedingMinLoadNodes int
|
||||
if parallelNode <= numMaxLoadNodes {
|
||||
numPrecedingMinLoadNodes = 0
|
||||
} else {
|
||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||
}
|
||||
|
||||
// Evaluate the test start index and number of tests to run
|
||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
count = minTestsPerNode
|
||||
} else {
|
||||
count = maxTestsPerNode
|
||||
}
|
||||
return
|
||||
}
|
||||
149
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go
generated
vendored
Normal file
149
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
package spec_iterator_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ParallelizedIndexRange", func() {
|
||||
var startIndex, count int
|
||||
|
||||
It("should return the correct index range for 4 tests on 2 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(4, 2, 1)
|
||||
Ω(startIndex).Should(Equal(0))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(4, 2, 2)
|
||||
Ω(startIndex).Should(Equal(2))
|
||||
Ω(count).Should(Equal(2))
|
||||
})
|
||||
|
||||
It("should return the correct index range for 5 tests on 2 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(5, 2, 1)
|
||||
Ω(startIndex).Should(Equal(0))
|
||||
Ω(count).Should(Equal(3))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 2, 2)
|
||||
Ω(startIndex).Should(Equal(3))
|
||||
Ω(count).Should(Equal(2))
|
||||
})
|
||||
|
||||
It("should return the correct index range for 5 tests on 3 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(5, 3, 1)
|
||||
Ω(startIndex).Should(Equal(0))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 3, 2)
|
||||
Ω(startIndex).Should(Equal(2))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 3, 3)
|
||||
Ω(startIndex).Should(Equal(4))
|
||||
Ω(count).Should(Equal(1))
|
||||
})
|
||||
|
||||
It("should return the correct index range for 5 tests on 4 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(5, 4, 1)
|
||||
Ω(startIndex).Should(Equal(0))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 4, 2)
|
||||
Ω(startIndex).Should(Equal(2))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 4, 3)
|
||||
Ω(startIndex).Should(Equal(3))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 4, 4)
|
||||
Ω(startIndex).Should(Equal(4))
|
||||
Ω(count).Should(Equal(1))
|
||||
})
|
||||
|
||||
It("should return the correct index range for 5 tests on 5 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(5, 5, 1)
|
||||
Ω(startIndex).Should(Equal(0))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 5, 2)
|
||||
Ω(startIndex).Should(Equal(1))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 5, 3)
|
||||
Ω(startIndex).Should(Equal(2))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 5, 4)
|
||||
Ω(startIndex).Should(Equal(3))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 5, 5)
|
||||
Ω(startIndex).Should(Equal(4))
|
||||
Ω(count).Should(Equal(1))
|
||||
})
|
||||
|
||||
It("should return the correct index range for 5 tests on 6 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(5, 6, 1)
|
||||
Ω(startIndex).Should(Equal(0))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 6, 2)
|
||||
Ω(startIndex).Should(Equal(1))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 6, 3)
|
||||
Ω(startIndex).Should(Equal(2))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 6, 4)
|
||||
Ω(startIndex).Should(Equal(3))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 6, 5)
|
||||
Ω(startIndex).Should(Equal(4))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 6, 6)
|
||||
Ω(count).Should(Equal(0))
|
||||
})
|
||||
|
||||
It("should return the correct index range for 5 tests on 7 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(5, 7, 6)
|
||||
Ω(count).Should(Equal(0))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(5, 7, 7)
|
||||
Ω(count).Should(Equal(0))
|
||||
})
|
||||
|
||||
It("should return the correct index range for 11 tests on 7 nodes", func() {
|
||||
startIndex, count = ParallelizedIndexRange(11, 7, 1)
|
||||
Ω(startIndex).Should(Equal(0))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(11, 7, 2)
|
||||
Ω(startIndex).Should(Equal(2))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(11, 7, 3)
|
||||
Ω(startIndex).Should(Equal(4))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(11, 7, 4)
|
||||
Ω(startIndex).Should(Equal(6))
|
||||
Ω(count).Should(Equal(2))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(11, 7, 5)
|
||||
Ω(startIndex).Should(Equal(8))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(11, 7, 6)
|
||||
Ω(startIndex).Should(Equal(9))
|
||||
Ω(count).Should(Equal(1))
|
||||
|
||||
startIndex, count = ParallelizedIndexRange(11, 7, 7)
|
||||
Ω(startIndex).Should(Equal(10))
|
||||
Ω(count).Should(Equal(1))
|
||||
})
|
||||
|
||||
})
|
||||
60
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
60
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
package spec_iterator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type ParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
host string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
|
||||
return &ParallelIterator{
|
||||
specs: specs,
|
||||
host: host,
|
||||
client: &http.Client{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
||||
resp, err := s.client.Get(s.host + "/counter")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode))
|
||||
}
|
||||
|
||||
var counter Counter
|
||||
err = json.NewDecoder(resp.Body).Decode(&counter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if counter.Index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
return s.specs[counter.Index], nil
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
||||
112
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
generated
vendored
Normal file
112
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
package spec_iterator_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
. "github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
"github.com/onsi/gomega/ghttp"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ParallelSpecIterator", func() {
|
||||
var specs []*spec.Spec
|
||||
var iterator *ParallelIterator
|
||||
var server *ghttp.Server
|
||||
|
||||
newSpec := func(text string, flag types.FlagType) *spec.Spec {
|
||||
subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
|
||||
return spec.New(subject, []*containernode.ContainerNode{}, false)
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
specs = []*spec.Spec{
|
||||
newSpec("A", types.FlagTypePending),
|
||||
newSpec("B", types.FlagTypeNone),
|
||||
newSpec("C", types.FlagTypeNone),
|
||||
newSpec("D", types.FlagTypeNone),
|
||||
}
|
||||
specs[3].Skip()
|
||||
|
||||
server = ghttp.NewServer()
|
||||
|
||||
iterator = NewParallelIterator(specs, "http://"+server.Addr())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
server.Close()
|
||||
})
|
||||
|
||||
It("should report the total number of specs", func() {
|
||||
Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
|
||||
})
|
||||
|
||||
It("should not report the number to be processed", func() {
|
||||
n, known := iterator.NumberOfSpecsToProcessIfKnown()
|
||||
Ω(n).Should(Equal(-1))
|
||||
Ω(known).Should(BeFalse())
|
||||
})
|
||||
|
||||
It("should not report the number that will be run", func() {
|
||||
n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
Ω(n).Should(Equal(-1))
|
||||
Ω(known).Should(BeFalse())
|
||||
})
|
||||
|
||||
Describe("iterating", func() {
|
||||
Describe("when the server returns well-formed responses", func() {
|
||||
BeforeEach(func() {
|
||||
server.AppendHandlers(
|
||||
ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{0}),
|
||||
ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{1}),
|
||||
ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{3}),
|
||||
ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{4}),
|
||||
)
|
||||
})
|
||||
|
||||
It("should return the specs in question", func() {
|
||||
Ω(iterator.Next()).Should(Equal(specs[0]))
|
||||
Ω(iterator.Next()).Should(Equal(specs[1]))
|
||||
Ω(iterator.Next()).Should(Equal(specs[3]))
|
||||
spec, err := iterator.Next()
|
||||
Ω(spec).Should(BeNil())
|
||||
Ω(err).Should(MatchError(ErrClosed))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when the server 404s", func() {
|
||||
BeforeEach(func() {
|
||||
server.AppendHandlers(
|
||||
ghttp.RespondWith(http.StatusNotFound, ""),
|
||||
)
|
||||
})
|
||||
|
||||
It("should return an error", func() {
|
||||
spec, err := iterator.Next()
|
||||
Ω(spec).Should(BeNil())
|
||||
Ω(err).Should(MatchError("unexpected status code 404"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when the server returns gibberish", func() {
|
||||
BeforeEach(func() {
|
||||
server.AppendHandlers(
|
||||
ghttp.RespondWith(http.StatusOK, "ß"),
|
||||
)
|
||||
})
|
||||
|
||||
It("should error", func() {
|
||||
spec, err := iterator.Next()
|
||||
Ω(spec).Should(BeNil())
|
||||
Ω(err).ShouldNot(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package spec_iterator
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type SerialIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
}
|
||||
|
||||
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
|
||||
return &SerialIterator{
|
||||
specs: specs,
|
||||
index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SerialIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return len(s.specs), true
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for _, s := range s.specs {
|
||||
if !s.Skipped() && !s.Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
||||
64
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package spec_iterator_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("SerialSpecIterator", func() {
|
||||
var specs []*spec.Spec
|
||||
var iterator *SerialIterator
|
||||
|
||||
newSpec := func(text string, flag types.FlagType) *spec.Spec {
|
||||
subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
|
||||
return spec.New(subject, []*containernode.ContainerNode{}, false)
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
specs = []*spec.Spec{
|
||||
newSpec("A", types.FlagTypePending),
|
||||
newSpec("B", types.FlagTypeNone),
|
||||
newSpec("C", types.FlagTypeNone),
|
||||
newSpec("D", types.FlagTypeNone),
|
||||
}
|
||||
specs[3].Skip()
|
||||
|
||||
iterator = NewSerialIterator(specs)
|
||||
})
|
||||
|
||||
It("should report the total number of specs", func() {
|
||||
Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
|
||||
})
|
||||
|
||||
It("should report the number to be processed", func() {
|
||||
n, known := iterator.NumberOfSpecsToProcessIfKnown()
|
||||
Ω(n).Should(Equal(4))
|
||||
Ω(known).Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should report the number that will be run", func() {
|
||||
n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
Ω(n).Should(Equal(2))
|
||||
Ω(known).Should(BeTrue())
|
||||
})
|
||||
|
||||
Describe("iterating", func() {
|
||||
It("should return the specs in order", func() {
|
||||
Ω(iterator.Next()).Should(Equal(specs[0]))
|
||||
Ω(iterator.Next()).Should(Equal(specs[1]))
|
||||
Ω(iterator.Next()).Should(Equal(specs[2]))
|
||||
Ω(iterator.Next()).Should(Equal(specs[3]))
|
||||
spec, err := iterator.Next()
|
||||
Ω(spec).Should(BeNil())
|
||||
Ω(err).Should(MatchError(ErrClosed))
|
||||
})
|
||||
})
|
||||
})
|
||||
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package spec_iterator
|
||||
|
||||
import "github.com/onsi/ginkgo/internal/spec"
|
||||
|
||||
type ShardedParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
maxIndex int
|
||||
}
|
||||
|
||||
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
|
||||
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
|
||||
|
||||
return &ShardedParallelIterator{
|
||||
specs: specs,
|
||||
index: startIndex,
|
||||
maxIndex: startIndex + count,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= s.maxIndex {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return s.maxIndex - s.index, true
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for i := s.index; i < s.maxIndex; i += 1 {
|
||||
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
||||
62
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go
generated
vendored
Normal file
62
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
package spec_iterator_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ShardedParallelSpecIterator", func() {
|
||||
var specs []*spec.Spec
|
||||
var iterator *ShardedParallelIterator
|
||||
|
||||
newSpec := func(text string, flag types.FlagType) *spec.Spec {
|
||||
subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
|
||||
return spec.New(subject, []*containernode.ContainerNode{}, false)
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
specs = []*spec.Spec{
|
||||
newSpec("A", types.FlagTypePending),
|
||||
newSpec("B", types.FlagTypeNone),
|
||||
newSpec("C", types.FlagTypeNone),
|
||||
newSpec("D", types.FlagTypeNone),
|
||||
}
|
||||
specs[3].Skip()
|
||||
|
||||
iterator = NewShardedParallelIterator(specs, 2, 1)
|
||||
})
|
||||
|
||||
It("should report the total number of specs", func() {
|
||||
Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
|
||||
})
|
||||
|
||||
It("should report the number to be processed", func() {
|
||||
n, known := iterator.NumberOfSpecsToProcessIfKnown()
|
||||
Ω(n).Should(Equal(2))
|
||||
Ω(known).Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should report the number that will be run", func() {
|
||||
n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
Ω(n).Should(Equal(1))
|
||||
Ω(known).Should(BeTrue())
|
||||
})
|
||||
|
||||
Describe("iterating", func() {
|
||||
It("should return the specs in order", func() {
|
||||
Ω(iterator.Next()).Should(Equal(specs[0]))
|
||||
Ω(iterator.Next()).Should(Equal(specs[1]))
|
||||
spec, err := iterator.Next()
|
||||
Ω(spec).Should(BeNil())
|
||||
Ω(err).Should(MatchError(ErrClosed))
|
||||
})
|
||||
})
|
||||
})
|
||||
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package spec_iterator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("no more specs to run")
|
||||
|
||||
type SpecIterator interface {
|
||||
Next() (*spec.Spec, error)
|
||||
NumberOfSpecsPriorToIteration() int
|
||||
NumberOfSpecsToProcessIfKnown() (int, bool)
|
||||
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
|
||||
}
|
||||
|
||||
type Counter struct {
|
||||
Index int `json:"index"`
|
||||
}
|
||||
13
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go
generated
vendored
Normal file
13
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
package spec_iterator_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSpecIterator(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "SpecIterator Suite")
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue