Skip to content

Commit

Permalink
Support Service loadBalancerSourceRanges in AntreaProxy
Browse files Browse the repository at this point in the history
For antrea-io#5493

This commit introduces support for loadBalancerSourceRanges for LoadBalancer
Services.

Here is an example of a LoadBalancer Service configuration allowing access
from specific CIDRs:

```yaml
apiVersion: v1
kind: Service
metadata:
  name: sample-loadbalancer-source-ranges
spec:
  selector:
    app: web
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
  type: LoadBalancer
  loadBalancerSourceRanges:
    - "192.168.77.0/24"
    - "192.168.78.0/24"
status:
  loadBalancer:
    ingress:
      - ip: 192.168.77.150
```

[New] Here are the corresponding flows:

```text
1. table=ServiceMark, priority=200,tcp,nw_src=192.168.77.0/24,nw_dst=192.168.77.150,tp_dst=80 actions=set_field:0x20000000/0x60000000->reg4",
2. table=ServiceMark, priority=200,tcp,nw_src=192.168.78.0/24,nw_dst=192.168.77.150,tp_dst=80 actions=set_field:0x20000000/0x60000000->reg4",
3. table=ServiceMark, priority=190,tcp,nw_dst=192.168.77.150,tp_dst=80 actions=set_field:0x40000000/0x60000000->reg4",
4. table=ServiceLB, priority=200,tcp,reg4=0x0x20010000/0x0x60070000,nw_dst=192.168.77.150,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x20000/0x70000->reg4,set_field:0xe->reg7,group:14
5. table=ServiceLB, priority=190,reg4=0x40000000/0x60000000 actions=drop
```

- Flow 1 is to match packets from allowed CIDR `192.168.77.0/24`, marking them with
  `LoadBalancerSourceRangesAllowRegMark`.
- Flow 2 is similar to flow 1 but for CIDR `192.168.78.0/24`.
- Flow 3 is to match packets not from allowed CIDRs, marking with
  `LoadBalancerSourceRangesDropRegMark`.
- Flow 4 is to match allowed packets with `LoadBalancerSourceRangesAllowRegMark` and
  perform load balancing.
- Flow 5 is to match not allowed packets with `LoadBalancerSourceRangesDropRegMark` and
  drop.

Signed-off-by: Hongliang Liu <[email protected]>
  • Loading branch information
hongliangl committed Sep 30, 2024
1 parent a996421 commit c76dd8e
Show file tree
Hide file tree
Showing 16 changed files with 619 additions and 221 deletions.
1 change: 1 addition & 0 deletions cmd/antrea-agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ func run(o *Options) error {
enableFlowExporter,
o.config.AntreaProxy.ProxyAll,
features.DefaultFeatureGate.Enabled(features.LoadBalancerModeDSR),
*o.config.AntreaProxy.ProxyLoadBalancerIPs,
connectUplinkToBridge,
multicastEnabled,
features.DefaultFeatureGate.Enabled(features.TrafficControl),
Expand Down
197 changes: 115 additions & 82 deletions docs/design/ovs-pipeline.md

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions pkg/agent/openflow/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -799,6 +799,9 @@ func (c *client) InstallServiceFlows(config *types.ServiceConfig) error {
if config.IsDSR {
flows = append(flows, c.featureService.dsrServiceMarkFlow(config))
}
if len(config.LoadBalancerSourceRanges) != 0 {
flows = append(flows, c.featureService.loadBalancerSourceRangesMarkFlows(config)...)
}
cacheKey := generateServicePortFlowCacheKey(config.ServiceIP, config.ServicePort, config.Protocol)
return c.addFlows(c.featureService.cachedFlows, cacheKey, flows)
}
Expand Down Expand Up @@ -940,6 +943,7 @@ func (c *client) generatePipelines() {
c.enableProxy,
c.proxyAll,
c.enableDSR,
c.proxyLoadBalancerIPs,
c.connectUplinkToBridge)
c.activatedFeatures = append(c.activatedFeatures, c.featureService)
c.traceableFeatures = append(c.traceableFeatures, c.featureService)
Expand Down
113 changes: 77 additions & 36 deletions pkg/agent/openflow/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ type clientOptions struct {
enableEgressTrafficShaping bool
proxyAll bool
enableDSR bool
proxyLoadBalancerIPs bool
connectUplinkToBridge bool
enableMulticast bool
enableTrafficControl bool
Expand Down Expand Up @@ -129,6 +130,10 @@ func disableEgress(o *clientOptions) {
o.enableEgress = false
}

func disableProxyLoadBalancerIPs(o *clientOptions) {
o.proxyLoadBalancerIPs = false
}

func enableEgressTrafficShaping(o *clientOptions) {
// traffic shaping requires meter support
o.enableOVSMeters = true
Expand Down Expand Up @@ -393,9 +398,10 @@ func newFakeClientWithBridge(
) *client {
// default options
o := &clientOptions{
enableProxy: true,
enableAntreaPolicy: true,
enableEgress: true,
enableProxy: true,
enableAntreaPolicy: true,
enableEgress: true,
proxyLoadBalancerIPs: true,
}
for _, fn := range options {
fn(o)
Expand All @@ -412,6 +418,7 @@ func newFakeClientWithBridge(
false,
o.proxyAll,
o.enableDSR,
o.proxyLoadBalancerIPs,
o.connectUplinkToBridge,
o.enableMulticast,
o.enableTrafficControl,
Expand Down Expand Up @@ -1017,8 +1024,8 @@ func Test_client_GetPodFlowKeys(t *testing.T) {
"table=1,priority=200,arp,in_port=11,arp_spa=10.10.0.11,arp_sha=00:00:10:10:00:11",
"table=3,priority=190,in_port=11",
"table=4,priority=200,ip,in_port=11,dl_src=00:00:10:10:00:11,nw_src=10.10.0.11",
"table=17,priority=200,ip,reg0=0x200/0x200,nw_dst=10.10.0.11",
"table=22,priority=200,dl_dst=00:00:10:10:00:11",
"table=18,priority=200,ip,reg0=0x200/0x200,nw_dst=10.10.0.11",
"table=23,priority=200,dl_dst=00:00:10:10:00:11",
}
assert.ElementsMatch(t, expectedFlowKeys, flowKeys)
}
Expand Down Expand Up @@ -1254,17 +1261,18 @@ func Test_client_InstallServiceFlows(t *testing.T) {
port := uint16(80)

testCases := []struct {
name string
trafficPolicyLocal bool
protocol binding.Protocol
svcIP net.IP
affinityTimeout uint16
isExternal bool
isNodePort bool
isNested bool
isDSR bool
enableMulticluster bool
expectedFlows []string
name string
trafficPolicyLocal bool
protocol binding.Protocol
svcIP net.IP
affinityTimeout uint16
isExternal bool
isNodePort bool
isNested bool
isDSR bool
enableMulticluster bool
loadBalancerSourceRanges []string
expectedFlows []string
}{
{
name: "Service ClusterIP",
Expand Down Expand Up @@ -1449,6 +1457,38 @@ func Test_client_InstallServiceFlows(t *testing.T) {
"cookie=0x1030000000064, table=DSRServiceMark, priority=200,tcp6,reg4=0xc000000/0xe000000,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=learn(table=SessionAffinity,idle_timeout=160,fin_idle_timeout=5,priority=210,delete_learned,cookie=0x1030000000064,eth_type=0x86dd,nw_proto=0x6,OXM_OF_TCP_SRC[],OXM_OF_TCP_DST[],NXM_NX_IPV6_SRC[],NXM_NX_IPV6_DST[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG4[25],load:NXM_NX_XXREG3[]->NXM_NX_XXREG3[]),set_field:0x2000000/0x2000000->reg4,goto_table:EndpointDNAT",
},
},
{
name: "Service LoadBalancer,LoadBalancerSourceRanges,SessionAffinity,Short-circuiting",
protocol: binding.ProtocolSCTP,
svcIP: svcIPv4,
affinityTimeout: uint16(100),
isExternal: true,
trafficPolicyLocal: true,
loadBalancerSourceRanges: []string{"192.168.1.0/24", "192.168.2.0/24"},
expectedFlows: []string{
"cookie=0x1030000000000, table=ServiceMark, priority=200,sctp,nw_src=192.168.1.0/24,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x20000000/0x60000000->reg4",
"cookie=0x1030000000000, table=ServiceMark, priority=200,sctp,nw_src=192.168.2.0/24,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x20000000/0x60000000->reg4",
"cookie=0x1030000000000, table=ServiceMark, priority=190,sctp,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x40000000/0x60000000->reg4",
"cookie=0x1030000000000, table=ServiceLB, priority=210,sctp,reg4=0x30010000/0x70070000,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x30000/0x70000->reg4,set_field:0x200000/0x200000->reg4,set_field:0x64->reg7,group:100",
"cookie=0x1030000000000, table=ServiceLB, priority=200,sctp,reg4=0x20010000/0x60070000,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x30000/0x70000->reg4,set_field:0x200000/0x200000->reg4,set_field:0x65->reg7,group:101",
"cookie=0x1030000000065, table=ServiceLB, priority=190,sctp,reg4=0x30000/0x70000,nw_dst=10.96.0.100,tp_dst=80 actions=learn(table=SessionAffinity,hard_timeout=100,priority=200,delete_learned,cookie=0x1030000000065,eth_type=0x800,nw_proto=0x84,OXM_OF_SCTP_DST[],NXM_OF_IP_DST[],NXM_OF_IP_SRC[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:NXM_NX_REG4[26]->NXM_NX_REG4[26],load:NXM_NX_REG3[]->NXM_NX_REG3[],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[9],load:0x1->NXM_NX_REG4[21]),set_field:0x20000/0x70000->reg4,goto_table:EndpointDNAT",
},
},
{
name: "Service LoadBalancer,LoadBalancerSourceRanges,IPv6,SessionAffinity",
protocol: binding.ProtocolSCTPv6,
svcIP: svcIPv6,
affinityTimeout: uint16(100),
isExternal: true,
loadBalancerSourceRanges: []string{"fec0:192:168:1::/64", "fec0:192:168:2::/64"},
expectedFlows: []string{
"cookie=0x1030000000000, table=ServiceMark, priority=200,sctp6,ipv6_src=fec0:192:168:1::/64,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x20000000/0x60000000->reg4",
"cookie=0x1030000000000, table=ServiceMark, priority=200,sctp6,ipv6_src=fec0:192:168:2::/64,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x20000000/0x60000000->reg4",
"cookie=0x1030000000000, table=ServiceMark, priority=190,sctp6,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x40000000/0x60000000->reg4",
"cookie=0x1030000000000, table=ServiceLB, priority=200,sctp6,reg4=0x20010000/0x60070000,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x30000/0x70000->reg4,set_field:0x200000/0x200000->reg4,set_field:0x64->reg7,group:100",
"cookie=0x1030000000064, table=ServiceLB, priority=190,sctp6,reg4=0x30000/0x70000,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=learn(table=SessionAffinity,hard_timeout=100,priority=200,delete_learned,cookie=0x1030000000064,eth_type=0x86dd,nw_proto=0x84,OXM_OF_SCTP_DST[],NXM_NX_IPV6_DST[],NXM_NX_IPV6_SRC[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:NXM_NX_REG4[26]->NXM_NX_REG4[26],load:NXM_NX_XXREG3[]->NXM_NX_XXREG3[],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[9],load:0x1->NXM_NX_REG4[21]),set_field:0x20000/0x70000->reg4,goto_table:EndpointDNAT",
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
Expand All @@ -1471,17 +1511,18 @@ func Test_client_InstallServiceFlows(t *testing.T) {
cacheKey := generateServicePortFlowCacheKey(tc.svcIP, port, tc.protocol)

assert.NoError(t, fc.InstallServiceFlows(&types.ServiceConfig{
ServiceIP: tc.svcIP,
ServicePort: port,
Protocol: tc.protocol,
TrafficPolicyLocal: tc.trafficPolicyLocal,
LocalGroupID: localGroupID,
ClusterGroupID: clusterGroupID,
AffinityTimeout: tc.affinityTimeout,
IsExternal: tc.isExternal,
IsNodePort: tc.isNodePort,
IsNested: tc.isNested,
IsDSR: tc.isDSR,
ServiceIP: tc.svcIP,
ServicePort: port,
Protocol: tc.protocol,
TrafficPolicyLocal: tc.trafficPolicyLocal,
LocalGroupID: localGroupID,
ClusterGroupID: clusterGroupID,
AffinityTimeout: tc.affinityTimeout,
IsExternal: tc.isExternal,
IsNodePort: tc.isNodePort,
IsNested: tc.isNested,
IsDSR: tc.isDSR,
LoadBalancerSourceRanges: tc.loadBalancerSourceRanges,
}))
fCacheI, ok := fc.featureService.cachedFlows.Load(cacheKey)
require.True(t, ok)
Expand Down Expand Up @@ -1527,11 +1568,11 @@ func Test_client_GetServiceFlowKeys(t *testing.T) {
assert.NoError(t, fc.InstallEndpointFlows(bindingProtocol, endpoints))
flowKeys := fc.GetServiceFlowKeys(svcIP, svcPort, bindingProtocol, endpoints)
expectedFlowKeys := []string{
"table=11,priority=200,tcp,reg4=0x10000/0x70000,nw_dst=10.96.0.224,tp_dst=80",
"table=11,priority=190,tcp,reg4=0x30000/0x70000,nw_dst=10.96.0.224,tp_dst=80",
"table=12,priority=200,tcp,reg3=0xa0a000b,reg4=0x20050/0x7ffff",
"table=12,priority=200,tcp,reg3=0xa0a000c,reg4=0x20050/0x7ffff",
"table=20,priority=190,ct_state=+new+trk,ip,nw_src=10.10.0.12,nw_dst=10.10.0.12",
"table=12,priority=200,tcp,reg4=0x10000/0x70000,nw_dst=10.96.0.224,tp_dst=80",
"table=12,priority=190,tcp,reg4=0x30000/0x70000,nw_dst=10.96.0.224,tp_dst=80",
"table=13,priority=200,tcp,reg3=0xa0a000b,reg4=0x20050/0x7ffff",
"table=13,priority=200,tcp,reg3=0xa0a000c,reg4=0x20050/0x7ffff",
"table=21,priority=190,ct_state=+new+trk,ip,nw_src=10.10.0.12,nw_dst=10.10.0.12",
}
assert.ElementsMatch(t, expectedFlowKeys, flowKeys)
}
Expand Down Expand Up @@ -2031,7 +2072,7 @@ func Test_client_setBasePacketOutBuilder(t *testing.T) {
}

func prepareSetBasePacketOutBuilder(ctrl *gomock.Controller, success bool) *client {
ofClient := NewClient(bridgeName, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, false, nil, false, defaultPacketInRate)
ofClient := NewClient(bridgeName, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, true, false, false, false, false, nil, false, defaultPacketInRate)
m := ovsoftest.NewMockBridge(ctrl)
ofClient.bridge = m
bridge := binding.OFBridge{}
Expand Down Expand Up @@ -2721,7 +2762,7 @@ func Test_client_ReplayFlows(t *testing.T) {
expectedFlows = append(expectedFlows, multicastInitFlows(true)...)
expectedFlows = append(expectedFlows, networkPolicyInitFlows(true, false, false)...)
expectedFlows = append(expectedFlows, podConnectivityInitFlows(config.TrafficEncapModeEncap, config.TrafficEncryptionModeNone, false, true, true, true)...)
expectedFlows = append(expectedFlows, serviceInitFlows(true, true, false, false)...)
expectedFlows = append(expectedFlows, serviceInitFlows(true, true, false, false, true)...)

addFlowInCache := func(cache *flowCategoryCache, cacheKey string, flows []binding.Flow) {
fCache := flowMessageCache{}
Expand Down Expand Up @@ -2787,8 +2828,8 @@ func Test_client_ReplayFlows(t *testing.T) {
"cookie=0x1020000000000, table=IngressMetric, priority=200,reg0=0x400/0x400,reg3=0xf actions=drop",
)
replayedFlows = append(replayedFlows,
"cookie=0x1020000000000, table=IngressRule, priority=200,conj_id=15 actions=set_field:0xf->reg3,set_field:0x400/0x400->reg0,set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x1b/0xff->reg2,group:4",
"cookie=0x1020000000000, table=IngressDefaultRule, priority=200,reg1=0x64 actions=set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x400000/0x600000->reg0,set_field:0x1c/0xff->reg2,goto_table:Output",
"cookie=0x1020000000000, table=IngressRule, priority=200,conj_id=15 actions=set_field:0xf->reg3,set_field:0x400/0x400->reg0,set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x1c/0xff->reg2,group:4",
"cookie=0x1020000000000, table=IngressDefaultRule, priority=200,reg1=0x64 actions=set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x400000/0x600000->reg0,set_field:0x1d/0xff->reg2,goto_table:Output",
)

// Feature Pod connectivity replays flows.
Expand Down
5 changes: 5 additions & 0 deletions pkg/agent/openflow/fields.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,11 @@ var (
FromExternalRegMark = binding.NewOneBitRegMark(4, 27)
// reg4[28]: Mark to indicate that whether the traffic's source is a local Pod or the Node.
FromLocalRegMark = binding.NewOneBitRegMark(4, 28)
// reg4[29..30]: Field to store how to do with the traffic of LoadBalancer Services configured with
// `loadBalancerSourceRanges`. Marks in this field include:
LoadBalancerSourceRangesActionField = binding.NewRegField(4, 29, 30)
LoadBalancerSourceRangesAllowRegMark = binding.NewRegMark(LoadBalancerSourceRangesActionField, 0b01)
LoadBalancerSourceRangesDropRegMark = binding.NewRegMark(LoadBalancerSourceRangesActionField, 0b10)

// reg5(NXM_NX_REG5)
// Field to cache the Egress conjunction ID hit by TraceFlow packet.
Expand Down
4 changes: 1 addition & 3 deletions pkg/agent/openflow/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,7 @@ func (f *featureService) getRequiredTables() []*Table {
tables := []*Table{
UnSNATTable,
PreRoutingClassifierTable,
ServiceMarkTable,
SessionAffinityTable,
ServiceLBTable,
EndpointDNATTable,
Expand All @@ -263,9 +264,6 @@ func (f *featureService) getRequiredTables() []*Table {
ConntrackCommitTable,
OutputTable,
}
if f.proxyAll {
tables = append(tables, NodePortMarkTable)
}
if f.enableDSR {
tables = append(tables, DSRServiceMarkTable)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/agent/openflow/framework_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ func TestBuildPipeline(t *testing.T) {
},
},
{
name: "K8s Node, IPv4 only, with proxyAll enabled",
name: "K8s Node, IPv4 only, with AntreaProxy enabled",
ipStack: ipv4Only,
features: []feature{
newTestFeaturePodConnectivity(ipStackMap[ipv4Only]),
Expand All @@ -426,7 +426,7 @@ func TestBuildPipeline(t *testing.T) {
ConntrackTable,
ConntrackStateTable,
PreRoutingClassifierTable,
NodePortMarkTable,
ServiceMarkTable,
SessionAffinityTable,
ServiceLBTable,
EndpointDNATTable,
Expand Down
42 changes: 38 additions & 4 deletions pkg/agent/openflow/pipeline.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ var (
// Tables in stagePreRouting:
// When proxy is enabled.
PreRoutingClassifierTable = newTable("PreRoutingClassifier", stagePreRouting, pipelineIP)
NodePortMarkTable = newTable("NodePortMark", stagePreRouting, pipelineIP)
ServiceMarkTable = newTable("ServiceMark", stagePreRouting, pipelineIP)
SessionAffinityTable = newTable("SessionAffinity", stagePreRouting, pipelineIP)
ServiceLBTable = newTable("ServiceLB", stagePreRouting, pipelineIP)
DSRServiceMarkTable = newTable("DSRServiceMark", stagePreRouting, pipelineIP)
Expand Down Expand Up @@ -391,6 +391,7 @@ type flowCategoryCache struct {
type client struct {
enableProxy bool
proxyAll bool
proxyLoadBalancerIPs bool
enableDSR bool
enableAntreaPolicy bool
enableL7NetworkPolicy bool
Expand Down Expand Up @@ -2288,7 +2289,7 @@ func (f *featureService) nodePortMarkFlows() []binding.Flow {
continue
}
flows = append(flows,
NodePortMarkTable.ofTable.BuildFlow(priorityNormal).
ServiceMarkTable.ofTable.BuildFlow(priorityNormal).
Cookie(cookieID).
MatchProtocol(ipProtocol).
MatchDstIP(nodePortAddresses[i]).
Expand All @@ -2298,7 +2299,7 @@ func (f *featureService) nodePortMarkFlows() []binding.Flow {
// This generates the flow for the virtual NodePort DNAT IP. The flow is used to mark the first packet of NodePort
// connection sourced from the Antrea gateway (the connection is performed DNAT with the virtual IP in host netns).
flows = append(flows,
NodePortMarkTable.ofTable.BuildFlow(priorityNormal).
ServiceMarkTable.ofTable.BuildFlow(priorityNormal).
Cookie(cookieID).
MatchProtocol(ipProtocol).
MatchDstIP(f.virtualNodePortDNATIPs[ipProtocol]).
Expand Down Expand Up @@ -2381,6 +2382,9 @@ func (f *featureService) serviceLBFlows(config *types.ServiceConfig) []binding.F
} else {
flowBuilder = flowBuilder.MatchDstIP(config.ServiceIP)
}
if len(config.LoadBalancerSourceRanges) != 0 {
flowBuilder = flowBuilder.MatchRegMark(LoadBalancerSourceRangesAllowRegMark)
}
if extraMatcher != nil {
flowBuilder = extraMatcher(flowBuilder)
}
Expand Down Expand Up @@ -2827,6 +2831,7 @@ func NewClient(bridgeName string,
enableDenyTracking bool,
proxyAll bool,
enableDSR bool,
proxyLoadBalancerIPs bool,
connectUplinkToBridge bool,
enableMulticast bool,
enableTrafficControl bool,
Expand All @@ -2843,6 +2848,7 @@ func NewClient(bridgeName string,
enableProxy: enableProxy,
proxyAll: proxyAll,
enableDSR: enableDSR,
proxyLoadBalancerIPs: proxyLoadBalancerIPs,
enableAntreaPolicy: enableAntreaPolicy,
enableL7NetworkPolicy: enableL7NetworkPolicy,
enableDenyTracking: enableDenyTracking,
Expand Down Expand Up @@ -3013,7 +3019,7 @@ func (f *featureService) preRoutingClassifierFlows() []binding.Flow {

targetTables := []uint8{SessionAffinityTable.GetID(), ServiceLBTable.GetID()}
if f.proxyAll {
targetTables = append([]uint8{NodePortMarkTable.GetID()}, targetTables...)
targetTables = append([]uint8{ServiceMarkTable.GetID()}, targetTables...)
}
for _, ipProtocol := range f.ipProtocols {
flows = append(flows,
Expand Down Expand Up @@ -3106,6 +3112,34 @@ func (f *featureService) gatewaySNATFlows() []binding.Flow {
return flows
}

func (f *featureService) loadBalancerSourceRangesMarkFlows(config *types.ServiceConfig) []binding.Flow {
cookieID := f.cookieAllocator.Request(f.category).Raw()
protocol := config.Protocol
ingressIP := config.ServiceIP
port := config.ServicePort
var flows []binding.Flow
for _, srcRange := range config.LoadBalancerSourceRanges {
_, srcIPNet, _ := net.ParseCIDR(srcRange)
flows = append(flows, ServiceMarkTable.ofTable.BuildFlow(priorityNormal).
Cookie(cookieID).
MatchProtocol(protocol).
MatchSrcIPNet(*srcIPNet).
MatchDstIP(ingressIP).
MatchDstPort(port, nil).
Action().LoadRegMark(LoadBalancerSourceRangesAllowRegMark).
Done(),
)
}
flows = append(flows, ServiceMarkTable.ofTable.BuildFlow(priorityLow).
Cookie(cookieID).
MatchProtocol(protocol).
MatchDstIP(ingressIP).
MatchDstPort(port, nil).
Action().LoadRegMark(LoadBalancerSourceRangesDropRegMark).
Done())
return flows
}

func getCachedFlowMessages(cache *flowCategoryCache) []*openflow15.FlowMod {
var flows []*openflow15.FlowMod
cache.Range(func(key, value interface{}) bool {
Expand Down
Loading

0 comments on commit c76dd8e

Please sign in to comment.