Revision 2cffd85640c6781860ddc7c15f3e6a77be5ca96f authored by Ryan Drew on 20 November 2023, 23:31:59 UTC, committed by Jussi Mäki on 05 April 2024, 12:00:13 UTC
[ upstream commit efff613258445ca9986e1b6b74af07875f20099c ] This commit restructures the OpenShift installation instructions to point to the Red Hat Ecosystem Catalog, so users may find vendor maintained OLM images. The old installation instructions which refer to the deprecated cilium/cilium-olm repository will be moved to the isovalent/olm-for-cilium repository. Fixes: #24270 Signed-off-by: Ryan Drew <ryan.drew@isovalent.com> Signed-off-by: Jussi Maki <jussi@isovalent.com>
1 parent 0f41f5c
resolve_test.go
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"sync"
"testing"
"github.com/sirupsen/logrus"
. "gopkg.in/check.v1"
"github.com/cilium/cilium/pkg/checker"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
testidentity "github.com/cilium/cilium/pkg/testutils/identity"
)
var (
fooLabel = labels.NewLabel("k8s:foo", "", "")
lbls = labels.Labels{
"foo": fooLabel,
}
lblsArray = lbls.LabelArray()
fooIdentity = &identity.Identity{
ID: 303,
Labels: lbls,
LabelArray: lbls.LabelArray(),
}
identityCache = cache.IdentityCache{303: lblsArray}
)
type dummyEndpoint struct {
ID uint16
SecurityIdentity *identity.Identity
Endpoint // Implement methods of the interface that need to mock out real behavior.
}
func (d *dummyEndpoint) GetID16() uint16 {
return d.ID
}
func (d *dummyEndpoint) IsHost() bool {
return false
}
func (d *dummyEndpoint) GetSecurityIdentity() (*identity.Identity, error) {
return d.SecurityIdentity, nil
}
func GenerateNumIdentities(numIdentities int) {
for i := 0; i < numIdentities; i++ {
identityLabel := labels.NewLabel(fmt.Sprintf("k8s:foo%d", i), "", "")
clusterLabel := labels.NewLabel("io.cilium.k8s.policy.cluster=default", "", labels.LabelSourceK8s)
serviceAccountLabel := labels.NewLabel("io.cilium.k8s.policy.serviceaccount=default", "", labels.LabelSourceK8s)
namespaceLabel := labels.NewLabel("io.kubernetes.pod.namespace=monitoring", "", labels.LabelSourceK8s)
funLabel := labels.NewLabel("app=analytics-erneh", "", labels.LabelSourceK8s)
identityLabels := labels.Labels{
fmt.Sprintf("foo%d", i): identityLabel,
"k8s:io.cilium.k8s.policy.cluster=default": clusterLabel,
"k8s:io.cilium.k8s.policy.serviceaccount=default": serviceAccountLabel,
"k8s:io.kubernetes.pod.namespace=monitoring": namespaceLabel,
"k8s:app=analytics-erneh": funLabel,
}
bumpedIdentity := i + 1000
numericIdentity := identity.NumericIdentity(bumpedIdentity)
identityCache[numericIdentity] = identityLabels.LabelArray()
}
}
func GenerateL3IngressRules(numRules int) api.Rules {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
ingRule := api.IngressRule{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{barSelector},
},
}
var rules api.Rules
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Ingress: []api.IngressRule{ingRule},
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules
}
func GenerateL3EgressRules(numRules int) api.Rules {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
egRule := api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{barSelector},
},
}
var rules api.Rules
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{egRule},
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules
}
func GenerateCIDRRules(numRules int) api.Rules {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
//barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
egRule := api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{api.CIDR("10.2.3.0/24"), api.CIDR("ff02::/64")},
},
/*ToRequires: []api.EndpointSelector{barSelector},
ToPorts: []api.PortRule{
{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoTCP,
},
},
},
},*/
}
var rules api.Rules
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{egRule},
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules
}
type DummyOwner struct{}
func (d DummyOwner) LookupRedirectPortLocked(bool, string, uint16) uint16 {
return 4242
}
func (d DummyOwner) HasBPFPolicyMap() bool {
return true
}
func (d DummyOwner) GetNamedPort(ingress bool, name string, proto uint8) uint16 {
return 80
}
func (d DummyOwner) GetNamedPortLocked(ingress bool, name string, proto uint8) uint16 {
return 80
}
func (d DummyOwner) GetID() uint64 {
return 1234
}
func (d DummyOwner) PolicyDebug(fields logrus.Fields, msg string) {
log.WithFields(fields).Info(msg)
}
func bootstrapRepo(ruleGenFunc func(int) api.Rules, numRules int, c *C) *Repository {
mgr := cache.NewCachingIdentityAllocator(&testidentity.IdentityAllocatorOwnerMock{})
ids := mgr.GetIdentityCache()
fakeAllocator := testidentity.NewMockIdentityAllocator(ids)
testRepo := NewPolicyRepository(fakeAllocator, ids, nil)
SetPolicyEnabled(option.DefaultEnforcement)
GenerateNumIdentities(3000)
wg := &sync.WaitGroup{}
testSelectorCache.UpdateIdentities(identityCache, nil, wg)
wg.Wait()
testRepo.selectorCache = testSelectorCache
rulez, _ := testRepo.AddList(ruleGenFunc(numRules))
epSet := NewEndpointSet(map[Endpoint]struct{}{
&dummyEndpoint{
ID: 9001,
SecurityIdentity: fooIdentity,
}: {},
})
epsToRegen := NewEndpointSet(nil)
wg = &sync.WaitGroup{}
rulez.UpdateRulesEndpointsCaches(epSet, epsToRegen, wg)
wg.Wait()
c.Assert(epSet.Len(), Equals, 0)
c.Assert(epsToRegen.Len(), Equals, 1)
return testRepo
}
func (ds *PolicyTestSuite) BenchmarkRegenerateCIDRPolicyRules(c *C) {
testRepo := bootstrapRepo(GenerateCIDRRules, 1000, c)
c.ResetTimer()
for i := 0; i < c.N; i++ {
ip, _ := testRepo.resolvePolicyLocked(fooIdentity)
_ = ip.DistillPolicy(DummyOwner{}, false)
ip.Detach()
}
}
func (ds *PolicyTestSuite) BenchmarkRegenerateL3IngressPolicyRules(c *C) {
testRepo := bootstrapRepo(GenerateL3IngressRules, 1000, c)
c.ResetTimer()
for i := 0; i < c.N; i++ {
ip, _ := testRepo.resolvePolicyLocked(fooIdentity)
_ = ip.DistillPolicy(DummyOwner{}, false)
ip.Detach()
}
}
func (ds *PolicyTestSuite) BenchmarkRegenerateL3EgressPolicyRules(c *C) {
testRepo := bootstrapRepo(GenerateL3EgressRules, 1000, c)
c.ResetTimer()
for i := 0; i < c.N; i++ {
ip, _ := testRepo.resolvePolicyLocked(fooIdentity)
_ = ip.DistillPolicy(DummyOwner{}, false)
ip.Detach()
}
}
func (ds *PolicyTestSuite) TestL7WithIngressWildcard(c *C) {
repo := bootstrapRepo(GenerateL3IngressRules, 1000, c)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/good"},
},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.Add(rule1, []Endpoint{})
c.Assert(err, IsNil)
repo.Mutex.RLock()
defer repo.Mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
c.Assert(err, IsNil)
policy := selPolicy.DistillPolicy(DummyOwner{}, false)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: &L4Policy{
Revision: repo.GetRevision(),
Ingress: L4PolicyMap{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
Ingress: true,
PerSelectorPolicies: L7DataMap{
wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/good"}},
},
CanShortCircuit: true,
isRedirect: true,
},
},
DerivedFromRules: labels.LabelArrayList{nil},
},
},
Egress: L4PolicyMap{},
redirectTypes: redirectTypeEnvoy,
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
// inherit this from the result as it is outside of the scope
// of this test
PolicyMapState: policy.PolicyMapState,
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
c.Assert(policy, checker.DeepEquals, &expectedEndpointPolicy)
}
func (ds *PolicyTestSuite) TestL7WithLocalHostWildcardd(c *C) {
repo := bootstrapRepo(GenerateL3IngressRules, 1000, c)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
// Emulate Kubernetes mode with allow from localhost
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/good"},
},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.Add(rule1, []Endpoint{})
c.Assert(err, IsNil)
repo.Mutex.RLock()
defer repo.Mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
c.Assert(err, IsNil)
policy := selPolicy.DistillPolicy(DummyOwner{}, false)
cachedSelectorHost := testSelectorCache.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameHost])
c.Assert(cachedSelectorHost, Not(IsNil))
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: &L4Policy{
Revision: repo.GetRevision(),
Ingress: L4PolicyMap{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
Ingress: true,
PerSelectorPolicies: L7DataMap{
wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/good"}},
},
CanShortCircuit: true,
isRedirect: true,
},
cachedSelectorHost: nil,
},
DerivedFromRules: labels.LabelArrayList{nil},
},
},
Egress: L4PolicyMap{},
redirectTypes: redirectTypeEnvoy,
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
// inherit this from the result as it is outside of the scope
// of this test
PolicyMapState: policy.PolicyMapState,
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
c.Assert(policy, checker.DeepEquals, &expectedEndpointPolicy)
}
func (ds *PolicyTestSuite) TestMapStateWithIngressWildcard(c *C) {
repo := bootstrapRepo(GenerateL3IngressRules, 1000, c)
ruleLabel := labels.ParseLabelArray("rule-foo-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.Add(rule1, []Endpoint{})
c.Assert(err, IsNil)
repo.Mutex.RLock()
defer repo.Mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
c.Assert(err, IsNil)
policy := selPolicy.DistillPolicy(DummyOwner{}, false)
rule1MapStateEntry := NewMapStateEntry(wildcardCachedSelector, labels.LabelArrayList{ruleLabel}, false, false, AuthTypeNone)
allowEgressMapStateEntry := NewMapStateEntry(nil, labels.LabelArrayList{ruleLabelAllowAnyEgress}, false, false, AuthTypeNone)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: &L4Policy{
Revision: repo.GetRevision(),
Ingress: L4PolicyMap{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: wildcardCachedSelector,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
wildcardCachedSelector: nil,
},
DerivedFromRules: labels.LabelArrayList{ruleLabel},
},
},
Egress: L4PolicyMap{},
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
PolicyMapState: MapState{
{TrafficDirection: trafficdirection.Egress.Uint8()}: allowEgressMapStateEntry,
{DestPort: 80, Nexthdr: 6}: rule1MapStateEntry,
},
}
// Add new identity to test accumulation of MapChanges
added1 := cache.IdentityCache{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1"),
}
wg := &sync.WaitGroup{}
testSelectorCache.UpdateIdentities(added1, nil, wg)
wg.Wait()
c.Assert(policy.policyMapChanges.changes, HasLen, 0)
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
c.Assert(policy, checker.Equals, &expectedEndpointPolicy)
}
func (ds *PolicyTestSuite) TestMapStateWithIngress(c *C) {
repo := bootstrapRepo(GenerateL3IngressRules, 1000, c)
ruleLabel := labels.ParseLabelArray("rule-world-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
lblTest := labels.ParseLabel("id=resolve_test_1")
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
{
Auth: &api.Auth{
Type: api.AuthTypeNull,
},
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(lblTest),
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.Add(rule1, []Endpoint{})
c.Assert(err, IsNil)
repo.Mutex.RLock()
defer repo.Mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
c.Assert(err, IsNil)
policy := selPolicy.DistillPolicy(DummyOwner{}, false)
// Add new identity to test accumulation of MapChanges
added1 := cache.IdentityCache{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1", "num=1"),
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
identity.NumericIdentity(194): labels.ParseSelectLabelArray("id=resolve_test_1", "num=3"),
}
wg := &sync.WaitGroup{}
testSelectorCache.UpdateIdentities(added1, nil, wg)
// Cleanup the identities from the testSelectorCache
defer testSelectorCache.UpdateIdentities(nil, added1, wg)
wg.Wait()
c.Assert(policy.policyMapChanges.changes, HasLen, 3)
deleted1 := cache.IdentityCache{
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
}
wg = &sync.WaitGroup{}
testSelectorCache.UpdateIdentities(nil, deleted1, wg)
wg.Wait()
c.Assert(policy.policyMapChanges.changes, HasLen, 4)
cachedSelectorWorld := testSelectorCache.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorld])
c.Assert(cachedSelectorWorld, Not(IsNil))
cachedSelectorTest := testSelectorCache.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
c.Assert(cachedSelectorTest, Not(IsNil))
rule1MapStateEntry := NewMapStateEntry(cachedSelectorTest, labels.LabelArrayList{ruleLabel}, false, false, AuthTypeNone)
allowEgressMapStateEntry := NewMapStateEntry(nil, labels.LabelArrayList{ruleLabelAllowAnyEgress}, false, false, AuthTypeNone)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: &L4Policy{
Revision: repo.GetRevision(),
Ingress: L4PolicyMap{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
cachedSelectorWorld: nil,
cachedSelectorTest: &PerSelectorPolicy{
Auth: &api.Auth{
Type: api.AuthTypeNull,
},
CanShortCircuit: true,
},
},
DerivedFromRules: labels.LabelArrayList{ruleLabel},
},
},
Egress: L4PolicyMap{},
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
PolicyMapState: MapState{
{TrafficDirection: trafficdirection.Egress.Uint8()}: allowEgressMapStateEntry,
{Identity: uint32(identity.ReservedIdentityWorld), DestPort: 80, Nexthdr: 6}: rule1MapStateEntry.WithOwners(cachedSelectorWorld),
{Identity: 192, DestPort: 80, Nexthdr: 6}: rule1MapStateEntry.WithAuthType(AuthTypeNull),
{Identity: 194, DestPort: 80, Nexthdr: 6}: rule1MapStateEntry.WithAuthType(AuthTypeNull),
},
}
// Have to remove circular reference before testing for Equality to avoid an infinite loop
policy.selectorPolicy.Detach()
// Verify that cached selector is not found after Detach().
// Note that this depends on the other tests NOT using the same selector concurrently!
cachedSelectorTest = testSelectorCache.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
c.Assert(cachedSelectorTest, IsNil)
adds, deletes := policy.ConsumeMapChanges()
// maps on the policy got cleared
c.Assert(policy.policyMapChanges.changes, IsNil)
c.Assert(adds, checker.Equals, Keys{
{Identity: 192, DestPort: 80, Nexthdr: 6}: {},
{Identity: 194, DestPort: 80, Nexthdr: 6}: {},
})
c.Assert(deletes, checker.Equals, Keys{
{Identity: 193, DestPort: 80, Nexthdr: 6}: {},
})
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
c.Assert(policy, checker.Equals, &expectedEndpointPolicy)
}
func TestEndpointPolicy_AllowsIdentity(t *testing.T) {
type fields struct {
selectorPolicy *selectorPolicy
PolicyMapState MapState
}
type args struct {
identity identity.NumericIdentity
}
tests := []struct {
name string
fields fields
args args
wantIngress bool
wantEgress bool
}{
{
name: "policy disabled",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: false,
EgressPolicyEnabled: false,
},
PolicyMapState: MapState{},
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: true,
},
{
name: "policy enabled",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: MapState{},
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy enabled for ingress",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: MapState{
Key{
Identity: 0,
DestPort: 0,
Nexthdr: 0,
TrafficDirection: trafficdirection.Ingress.Uint8(),
}: MapStateEntry{},
},
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: false,
},
{
name: "policy enabled for egress",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: MapState{
Key{
Identity: 0,
DestPort: 0,
Nexthdr: 0,
TrafficDirection: trafficdirection.Egress.Uint8(),
}: MapStateEntry{},
},
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: true,
},
{
name: "policy enabled for ingress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: MapState{
Key{
Identity: 0,
DestPort: 0,
Nexthdr: 0,
TrafficDirection: trafficdirection.Ingress.Uint8(),
}: MapStateEntry{IsDeny: true},
},
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy disabled for ingress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: false,
EgressPolicyEnabled: true,
},
PolicyMapState: MapState{
Key{
Identity: 0,
DestPort: 0,
Nexthdr: 0,
TrafficDirection: trafficdirection.Ingress.Uint8(),
}: MapStateEntry{IsDeny: true},
},
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: false,
},
{
name: "policy enabled for egress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: MapState{
Key{
Identity: 0,
DestPort: 0,
Nexthdr: 0,
TrafficDirection: trafficdirection.Egress.Uint8(),
}: MapStateEntry{IsDeny: true},
},
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy disabled for egress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyMapState: MapState{
Key{
Identity: 0,
DestPort: 0,
Nexthdr: 0,
TrafficDirection: trafficdirection.Egress.Uint8(),
}: MapStateEntry{IsDeny: true},
},
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &EndpointPolicy{
selectorPolicy: tt.fields.selectorPolicy,
PolicyMapState: tt.fields.PolicyMapState,
}
gotIngress, gotEgress := p.AllowsIdentity(tt.args.identity)
if gotIngress != tt.wantIngress {
t.Errorf("AllowsIdentity() gotIngress = %v, want %v", gotIngress, tt.wantIngress)
}
if gotEgress != tt.wantEgress {
t.Errorf("AllowsIdentity() gotEgress = %v, want %v", gotEgress, tt.wantEgress)
}
})
}
}
Computing file changes ...