diff --git a/docs/design/ovs-pipeline.md b/docs/design/ovs-pipeline.md index 5f3aecd2f07..8441dd064c1 100644 --- a/docs/design/ovs-pipeline.md +++ b/docs/design/ovs-pipeline.md @@ -128,7 +128,7 @@ to the registers we use. | | | | 0b01 | DispositionDropRegMark | Indicates Antrea NetworkPolicy disposition: drop. | | | | | 0b11 | DispositionPassRegMark | Indicates Antrea NetworkPolicy disposition: pass. | | | bit 13 | | 0b1 | GeneratedRejectPacketOutRegMark | Indicates packet is a generated reject response packet-out. | -| | bit 14 | | 0b1 | SvcNoEpRegMark | Indicates packet towards a Service without Endpoint. | +| | bit 14 | | 0b1 | SvcRejectRegMark | Indicates packet towards a Service should be rejected. | | | bit 19 | | 0b1 | RemoteSNATRegMark | Indicates packet needs SNAT on a remote Node. | | | bit 22 | | 0b1 | L7NPRedirectRegMark | Indicates L7 Antrea NetworkPolicy disposition of redirect. | | | bits 21-22 | OutputRegField | 0b01 | OutputToOFPortRegMark | Output packet to an OVS port. | @@ -156,6 +156,7 @@ to the registers we use. | | bit 26 | | 0b1 | RemoteEndpointRegMark | Packet is destined for a Service selecting a remote non-hostNetwork Endpoint. | | | bit 27 | | 0b1 | FromExternalRegMark | Packet is from Antrea gateway, but its source IP is not the gateway IP. | | | bit 28 | | 0b1 | FromLocalRegMark | Packet is from a local Pod or the Node. | +| | bit 29 | | 0b1 | LoadBalancerSourceRangesRegMark | Packet's source is included in the LoadBalancerSourceRanges of the LoadBalancer Service. | | NXM_NX_REG5 | bits 0-31 | TFEgressConjIDField | | | Egress conjunction ID hit by TraceFlow packet. | | NXM_NX_REG6 | bits 0-31 | TFIngressConjIDField | | | Ingress conjunction ID hit by TraceFlow packet. | | NXM_NX_REG7 | bits 0-31 | ServiceGroupIDField | | | GroupID corresponding to the Service. | @@ -254,7 +255,7 @@ spec: ## Kubernetes Service Implementation Like K8s NetworkPolicy, several tables of the pipeline are dedicated to [Kubernetes -Service](https://kubernetes.io/docs/concepts/services-networking/service/) implementation (tables [NodePortMark], +Service](https://kubernetes.io/docs/concepts/services-networking/service/) implementation (tables [ServiceMark], [SessionAffinity], [ServiceLB], and [EndpointDNAT]). By enabling `proxyAll`, ClusterIP, NodePort, LoadBalancer, and ExternalIP are all handled by Antrea Proxy. Otherwise, @@ -408,6 +409,34 @@ status: - ip: 192.168.77.151 ``` +### LoadBalancer with LoadBalancerSourceRanges + +A sample LoadBalancer Service, with ingress IP `192.168.77.152` assigned by an ingress controller, configured +`loadBalancerSourceRanges` to a CIDR list and `externalTrafficPolicy` to `Local`. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sample-loadbalancer-source-ranges +spec: + selector: + app: web + ports: + - protocol: TCP + port: 80 + targetPort: 80 + type: LoadBalancer + externalTrafficPolicy: Local + loadBalancerSourceRanges: + - "192.168.77.0/24" + - "192.168.78.0/24" +status: + loadBalancer: + ingress: + - ip: 192.168.77.152 +``` + ## Antrea-native NetworkPolicy Implementation In addition to the tables created for K8s NetworkPolicy, Antrea creates additional dedicated tables to support @@ -863,7 +892,7 @@ specific cases: 1. Dropping invalid packets reported by conntrack. 2. Forwarding tracked packets from all connections to table [AntreaPolicyEgressRule] directly, bypassing the tables - like [PreRoutingClassifier], [NodePortMark], [SessionAffinity], [ServiceLB], and [EndpointDNAT] for Service Endpoint + like [PreRoutingClassifier], [ServiceMark], [SessionAffinity], [ServiceLB], and [EndpointDNAT] for Service Endpoint selection. 3. Forwarding packets from new connections to table [PreRoutingClassifier] to start Service Endpoint selection since Service connections are not identified at this stage. @@ -893,35 +922,42 @@ Flow 4 is the table-miss flow for case 3, matching packets from all new connecti ### PreRoutingClassifier This table handles the first packet from uncommitted Service connections before Service Endpoint selection. It -sequentially resubmits the packets to tables [NodePortMark] and [SessionAffinity] to do some pre-processing, including +sequentially resubmits the packets to tables [ServiceMark] and [SessionAffinity] to do some pre-processing, including the loading of specific reg marks. Subsequently, it forwards the packets to table [ServiceLB] to perform Service Endpoint selection. If you dump the flows of this table, you may see the following: ```text -1. table=PreRoutingClassifier, priority=200,ip actions=resubmit(,NodePortMark),resubmit(,SessionAffinity),resubmit(,ServiceLB) -2. table=PreRoutingClassifier, priority=0 actions=goto_table:NodePortMark +1. table=PreRoutingClassifier, priority=200,ip actions=resubmit(,ServiceMark),resubmit(,SessionAffinity),resubmit(,ServiceLB) +2. table=PreRoutingClassifier, priority=0 actions=goto_table:ServiceMark ``` -Flow 1 sequentially resubmits packets to tables [NodePortMark], [SessionAffinity], and [ServiceLB]. Note that packets -are ultimately forwarded to table [ServiceLB]. In tables [NodePortMark] and [SessionAffinity], only reg marks are loaded. +Flow 1 sequentially resubmits packets to tables [ServiceMark], [SessionAffinity], and [ServiceLB]. Note that packets +are ultimately forwarded to table [ServiceLB]. In tables [ServiceMark] and [SessionAffinity], only reg marks are loaded. Flow 2 is the table-miss flow that should remain unused. -### NodePortMark +### ServiceMark + +This table is designed to mark Service traffic. It addresses specific cases: -This table is designed to potentially mark packets destined for NodePort Services. It is only created when `proxyAll` is -enabled. +1. Packets potentially destined for NodePort Services. +2. Packets destined for LoadBalancer Services configured with `loadBalancerSourceRanges`. If you dump the flows of this table, you may see the following: ```text -1. table=NodePortMark, priority=200,ip,nw_dst=192.168.77.102 actions=set_field:0x80000/0x80000->reg4 -2. table=NodePortMark, priority=200,ip,nw_dst=169.254.0.252 actions=set_field:0x80000/0x80000->reg4 -3. table=NodePortMark, priority=0 actions=goto_table:SessionAffinity +1. table=ServiceMark, priority=200,ip,nw_dst=192.168.77.102 actions=set_field:0x80000/0x80000->reg4 +2. table=ServiceMark, priority=200,ip,nw_dst=169.254.0.252 actions=set_field:0x80000/0x80000->reg4 +3. table=ServiceMark, priority=200,tcp,nw_src=192.168.77.0/24,nw_dst=192.168.77.152,tp_dst=80 actions=set_field:0x20000000/0x20000000->reg4", +4. table=ServiceMark, priority=200,tcp,nw_src=192.168.78.0/24,nw_dst=192.168.77.152,tp_dst=80 actions=set_field:0x20000000/0x20000000->reg4", +5. table=ServiceMark, priority=190,tcp,nw_dst=192.168.77.152,tp_dst=80 actions=set_field:0x4000/0x4000->reg0", +6. table=ServiceMark, priority=0 actions=goto_table:SessionAffinity ``` +Flows 1-2 are designed for case 1. + Flow 1 matches packets destined for the local Node from local Pods. `NodePortRegMark` is loaded, indicating that the packets are potentially destined for NodePort Services. We assume only one valid IP address, `192.168.77.102` (the Node's transport IP), can serve as the host IP address for NodePort based on the option `antreaProxy.nodePortAddresses`. @@ -931,10 +967,23 @@ IP address. Flow 2 match packets destined for the *Virtual NodePort DNAT IP*. Packets destined for NodePort Services from the local Node or the external network is DNAT'd to the *Virtual NodePort DNAT IP* by iptables before entering the pipeline. -Flow 3 is the table-miss flow. +Flows 3-5 are designed for case 2. + +Flows 3-4 are used to match the packets destined for the sample [LoadBalancer with LoadBalancerSourceRanges], and these +packets are also from the CIDRs within the `loadBalancerSourceRanges` of the Services. `LoadBalancerSourceRangesRegMark`, +which will be consumed in table [ServiceLB], is loaded to identify that the corresponding connections should get +load-balanced in table [ServiceLB]. + +Flow 5, having lower priority than that of flows 3-4, is also used to match the packets destined for the sample +[LoadBalancer with LoadBalancerSourceRanges], but these packets are not from any CIDRs within the +`loadBalancerSourceRanges` of the Services. `SvcRejectRegMark`, which will be consumed in table [EndpointDNAT], is +loaded to identify that the corresponding connections should be rejected. Since `LoadBalancerSourceRangesRegMark` is not +loaded for the packets, the corresponding connections will not get load-balanced in table [ServiceLB]. + +Flow 6 is the table-miss flow. Note that packets of NodePort Services have not been identified in this table by matching destination IP address. The -identification of NodePort Services will be done finally in table [ServiceLB] by matching `NodePortRegMark` and the +identification of NodePort Services will be done finally in table [ServiceLB] by matching `NodePortRegMark` and the specific destination port of a NodePort. ### SessionAffinity @@ -978,8 +1027,10 @@ This table is used to implement Service Endpoint selection. It addresses specifi 3. LoadBalancer, as demonstrated in the example [LoadBalancer]. 4. Service configured with external IPs, as demonstrated in the example [Service with ExternalIP]. 5. Service configured with session affinity, as demonstrated in the example [Service with session affinity]. -6. Service configured with externalTrafficPolicy to `Local`, as demonstrated in the example [Service with +6. Service configured with `externalTrafficPolicy` to `Local`, as demonstrated in the example [Service with ExternalTrafficPolicy Local]. +7. LoadBalancer configured with `loadBalancerSourceRanges`, as demonstrated in the example [LoadBalancer with + LoadBalancerSourceRanges]. If you dump the flows of this table, you may see the following: @@ -995,7 +1046,9 @@ If you dump the flows of this table, you may see the following: set_field:0x20000/0x70000->reg4,goto_table:EndpointDNAT 8. table=ServiceLB, priority=210,tcp,reg4=0x10010000/0x10070000,nw_dst=192.168.77.151,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x20000/0x70000->reg4,set_field:0x11->reg7,group:17 9. table=ServiceLB, priority=200,tcp,reg4=0x10000/0x70000,nw_dst=192.168.77.151,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x20000/0x70000->reg4,set_field:0x12->reg7,group:18 -10. table=ServiceLB, priority=0 actions=goto_table:EndpointDNAT +10. table=ServiceLB, priority=210,tcp,reg4=0x30010000/0x30070000,nw_dst=192.168.77.152,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x20000/0x70000->reg4,set_field:0x13->reg7,group:19 +11. table=ServiceLB, priority=200,tcp,reg4=0x20010000/0x20070000,nw_dst=192.168.77.152,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x20000/0x70000->reg4,set_field:0x14->reg7,group:20 +12. table=ServiceLB, priority=0 actions=goto_table:EndpointDNAT ``` Flow 1 and flow 2 are designed for case 1, matching the first packet of connections destined for the sample [ClusterIP @@ -1007,7 +1060,7 @@ loaded, indicating that the source and destination MAC addresses of the packets Service Endpoint selection is not completed yet, as it will be done in the target OVS group. Flow 3 is for case 2, matching the first packet of connections destined for the sample [NodePort]. This is achieved by -matching `EpToSelectRegMark` loaded in table [SessionAffinity], `NodePortRegMark` loaded in table [NodePortMark], and +matching `EpToSelectRegMark` loaded in table [SessionAffinity], `NodePortRegMark` loaded in table [ServiceMark], and NodePort port. Similar to flows 1-2, `RewriteMACRegMark` and `EpSelectedRegMark` are also loaded. Flow 4 is for case 3, processing the first packet of connections destined for the ingress IP of the sample @@ -1035,7 +1088,12 @@ Nodes, even though `externalTrafficPolicy` is set to `Local` for the Service. Du flow 9 exclusively matches packets sourced from the external network, resembling the pattern of flow 1. The target of flow 9 is an OVS group that has only the local Endpoints since `externalTrafficPolicy` of the Service is `Local`. -Flow 10 is the table-miss flow. +Flows 10-11 are for case 7. They are similar to flows 8-9 but with a slight divergence. These flows include an +additional reg mark `LoadBalancerSourceRangesRegMark`, which is loaded in table [ServiceMark]. This reg mark is used to +match the first packet destined for the sample [LoadBalancer with LoadBalancerSourceRanges], ensuring it originates from +a CIDR listed in the Service `loadBalancerSourceRanges`. + +Flow 12 is the table-miss flow. As mentioned above, the Service Endpoint selection is performed within OVS groups. 3 typical OVS groups are listed below: @@ -1051,7 +1109,7 @@ As mentioned above, the Service Endpoint selection is performed within OVS group ``` The first group with `group_id` 9 is the destination of packets matched by flow 1, designed for a Service without -Endpoints. The group only has a single bucket where `SvcNoEpRegMark` which will be used in table [EndpointDNAT] is +Endpoints. The group only has a single bucket where `SvcRejectRegMark` which will be used in table [EndpointDNAT] is loaded, indicating that the Service has no Endpoint, and then packets are forwarded to table [EndpointDNAT]. The second group with `group_id` 10 is the destination of packets matched by flow 2, designed for a Service with @@ -1081,7 +1139,7 @@ If you dump the flows of this table, you may see the following:: ``` Flow 1 is designed for Services without Endpoints. It identifies the first packet of connections destined for such Service -by matching `SvcNoEpRegMark`. Subsequently, the packet is forwarded to the OpenFlow controller (Antrea Agent). For TCP +by matching `SvcRejectRegMark`. Subsequently, the packet is forwarded to the OpenFlow controller (Antrea Agent). For TCP Service traffic, the controller will send a TCP RST, and for all other cases the controller will send an ICMP Destination Unreachable message. @@ -1312,7 +1370,7 @@ the following cases when Antrea Proxy is not enabled: to complete the DNAT processes, e.g., kube-proxy. The destination MAC of the packets is rewritten in the table to avoid it is forwarded to the original client Pod by mistake. - When hairpin is involved, i.e. connections between 2 local Pods, for which NAT is performed. One example is a - Pod accessing a NodePort Service for which externalTrafficPolicy is set to `Local` using the local Node's IP address, + Pod accessing a NodePort Service for which `externalTrafficPolicy` is set to `Local` using the local Node's IP address, as there will be no SNAT for such traffic. Another example could be hostPort support, depending on how the feature is implemented. @@ -1881,8 +1939,9 @@ Flow 8 is the table-miss flow for case 7. It drops packets that do not match any [L3DecTTL]: #l3decttl [L3Forwarding]: #l3forwarding [LoadBalancer]: #loadbalancer +[LoadBalancer with LoadBalancerSourceRanges]: #loadbalancer-with-loadbalancersourceranges [NodePort]: #nodeport -[NodePortMark]: #nodeportmark +[ServiceMark]: #servicemark [OVS Registers]: #ovs-registers [Output]: #output [PreRoutingClassifier]: #preroutingclassifier diff --git a/pkg/agent/openflow/client.go b/pkg/agent/openflow/client.go index e6d3ec5ef87..20b48a438bf 100644 --- a/pkg/agent/openflow/client.go +++ b/pkg/agent/openflow/client.go @@ -799,6 +799,9 @@ func (c *client) InstallServiceFlows(config *types.ServiceConfig) error { if config.IsDSR { flows = append(flows, c.featureService.dsrServiceMarkFlow(config)) } + if len(config.LoadBalancerSourceRanges) != 0 { + flows = append(flows, c.featureService.loadBalancerSourceRangesMarkFlows(config)...) + } cacheKey := generateServicePortFlowCacheKey(config.ServiceIP, config.ServicePort, config.Protocol) return c.addFlows(c.featureService.cachedFlows, cacheKey, flows) } diff --git a/pkg/agent/openflow/client_test.go b/pkg/agent/openflow/client_test.go index 658193b1220..b126cf70dbb 100644 --- a/pkg/agent/openflow/client_test.go +++ b/pkg/agent/openflow/client_test.go @@ -1017,8 +1017,8 @@ func Test_client_GetPodFlowKeys(t *testing.T) { "table=1,priority=200,arp,in_port=11,arp_spa=10.10.0.11,arp_sha=00:00:10:10:00:11", "table=3,priority=190,in_port=11", "table=4,priority=200,ip,in_port=11,dl_src=00:00:10:10:00:11,nw_src=10.10.0.11", - "table=17,priority=200,ip,reg0=0x200/0x200,nw_dst=10.10.0.11", - "table=22,priority=200,dl_dst=00:00:10:10:00:11", + "table=18,priority=200,ip,reg0=0x200/0x200,nw_dst=10.10.0.11", + "table=23,priority=200,dl_dst=00:00:10:10:00:11", } assert.ElementsMatch(t, expectedFlowKeys, flowKeys) } @@ -1254,17 +1254,18 @@ func Test_client_InstallServiceFlows(t *testing.T) { port := uint16(80) testCases := []struct { - name string - trafficPolicyLocal bool - protocol binding.Protocol - svcIP net.IP - affinityTimeout uint16 - isExternal bool - isNodePort bool - isNested bool - isDSR bool - enableMulticluster bool - expectedFlows []string + name string + trafficPolicyLocal bool + protocol binding.Protocol + svcIP net.IP + affinityTimeout uint16 + isExternal bool + isNodePort bool + isNested bool + isDSR bool + enableMulticluster bool + loadBalancerSourceRanges []string + expectedFlows []string }{ { name: "Service ClusterIP", @@ -1449,6 +1450,38 @@ func Test_client_InstallServiceFlows(t *testing.T) { "cookie=0x1030000000064, table=DSRServiceMark, priority=200,tcp6,reg4=0xc000000/0xe000000,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=learn(table=SessionAffinity,idle_timeout=160,fin_idle_timeout=5,priority=210,delete_learned,cookie=0x1030000000064,eth_type=0x86dd,nw_proto=0x6,OXM_OF_TCP_SRC[],OXM_OF_TCP_DST[],NXM_NX_IPV6_SRC[],NXM_NX_IPV6_DST[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG4[25],load:NXM_NX_XXREG3[]->NXM_NX_XXREG3[]),set_field:0x2000000/0x2000000->reg4,goto_table:EndpointDNAT", }, }, + { + name: "Service LoadBalancer,LoadBalancerSourceRanges,SessionAffinity,Short-circuiting", + protocol: binding.ProtocolSCTP, + svcIP: svcIPv4, + affinityTimeout: uint16(100), + isExternal: true, + trafficPolicyLocal: true, + loadBalancerSourceRanges: []string{"192.168.1.0/24", "192.168.2.0/24"}, + expectedFlows: []string{ + "cookie=0x1030000000000, table=ServiceMark, priority=200,sctp,nw_src=192.168.1.0/24,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x20000000/0x20000000->reg4", + "cookie=0x1030000000000, table=ServiceMark, priority=200,sctp,nw_src=192.168.2.0/24,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x20000000/0x20000000->reg4", + "cookie=0x1030000000000, table=ServiceMark, priority=190,sctp,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x4000/0x4000->reg0", + "cookie=0x1030000000000, table=ServiceLB, priority=210,sctp,reg4=0x30010000/0x30070000,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x30000/0x70000->reg4,set_field:0x200000/0x200000->reg4,set_field:0x64->reg7,group:100", + "cookie=0x1030000000000, table=ServiceLB, priority=200,sctp,reg4=0x20010000/0x20070000,nw_dst=10.96.0.100,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x30000/0x70000->reg4,set_field:0x200000/0x200000->reg4,set_field:0x65->reg7,group:101", + "cookie=0x1030000000065, table=ServiceLB, priority=190,sctp,reg4=0x30000/0x70000,nw_dst=10.96.0.100,tp_dst=80 actions=learn(table=SessionAffinity,hard_timeout=100,priority=200,delete_learned,cookie=0x1030000000065,eth_type=0x800,nw_proto=0x84,OXM_OF_SCTP_DST[],NXM_OF_IP_DST[],NXM_OF_IP_SRC[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:NXM_NX_REG4[26]->NXM_NX_REG4[26],load:NXM_NX_REG3[]->NXM_NX_REG3[],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[9],load:0x1->NXM_NX_REG4[21]),set_field:0x20000/0x70000->reg4,goto_table:EndpointDNAT", + }, + }, + { + name: "Service LoadBalancer,LoadBalancerSourceRanges,IPv6,SessionAffinity", + protocol: binding.ProtocolSCTPv6, + svcIP: svcIPv6, + affinityTimeout: uint16(100), + isExternal: true, + loadBalancerSourceRanges: []string{"fec0:192:168:1::/64", "fec0:192:168:2::/64"}, + expectedFlows: []string{ + "cookie=0x1030000000000, table=ServiceMark, priority=200,sctp6,ipv6_src=fec0:192:168:1::/64,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x20000000/0x20000000->reg4", + "cookie=0x1030000000000, table=ServiceMark, priority=200,sctp6,ipv6_src=fec0:192:168:2::/64,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x20000000/0x20000000->reg4", + "cookie=0x1030000000000, table=ServiceMark, priority=190,sctp6,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x4000/0x4000->reg0", + "cookie=0x1030000000000, table=ServiceLB, priority=200,sctp6,reg4=0x20010000/0x20070000,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=set_field:0x200/0x200->reg0,set_field:0x30000/0x70000->reg4,set_field:0x200000/0x200000->reg4,set_field:0x64->reg7,group:100", + "cookie=0x1030000000064, table=ServiceLB, priority=190,sctp6,reg4=0x30000/0x70000,ipv6_dst=fec0:10:96::100,tp_dst=80 actions=learn(table=SessionAffinity,hard_timeout=100,priority=200,delete_learned,cookie=0x1030000000064,eth_type=0x86dd,nw_proto=0x84,OXM_OF_SCTP_DST[],NXM_NX_IPV6_DST[],NXM_NX_IPV6_SRC[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:NXM_NX_REG4[26]->NXM_NX_REG4[26],load:NXM_NX_XXREG3[]->NXM_NX_XXREG3[],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[9],load:0x1->NXM_NX_REG4[21]),set_field:0x20000/0x70000->reg4,goto_table:EndpointDNAT", + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -1471,17 +1504,18 @@ func Test_client_InstallServiceFlows(t *testing.T) { cacheKey := generateServicePortFlowCacheKey(tc.svcIP, port, tc.protocol) assert.NoError(t, fc.InstallServiceFlows(&types.ServiceConfig{ - ServiceIP: tc.svcIP, - ServicePort: port, - Protocol: tc.protocol, - TrafficPolicyLocal: tc.trafficPolicyLocal, - LocalGroupID: localGroupID, - ClusterGroupID: clusterGroupID, - AffinityTimeout: tc.affinityTimeout, - IsExternal: tc.isExternal, - IsNodePort: tc.isNodePort, - IsNested: tc.isNested, - IsDSR: tc.isDSR, + ServiceIP: tc.svcIP, + ServicePort: port, + Protocol: tc.protocol, + TrafficPolicyLocal: tc.trafficPolicyLocal, + LocalGroupID: localGroupID, + ClusterGroupID: clusterGroupID, + AffinityTimeout: tc.affinityTimeout, + IsExternal: tc.isExternal, + IsNodePort: tc.isNodePort, + IsNested: tc.isNested, + IsDSR: tc.isDSR, + LoadBalancerSourceRanges: tc.loadBalancerSourceRanges, })) fCacheI, ok := fc.featureService.cachedFlows.Load(cacheKey) require.True(t, ok) @@ -1527,11 +1561,11 @@ func Test_client_GetServiceFlowKeys(t *testing.T) { assert.NoError(t, fc.InstallEndpointFlows(bindingProtocol, endpoints)) flowKeys := fc.GetServiceFlowKeys(svcIP, svcPort, bindingProtocol, endpoints) expectedFlowKeys := []string{ - "table=11,priority=200,tcp,reg4=0x10000/0x70000,nw_dst=10.96.0.224,tp_dst=80", - "table=11,priority=190,tcp,reg4=0x30000/0x70000,nw_dst=10.96.0.224,tp_dst=80", - "table=12,priority=200,tcp,reg3=0xa0a000b,reg4=0x20050/0x7ffff", - "table=12,priority=200,tcp,reg3=0xa0a000c,reg4=0x20050/0x7ffff", - "table=20,priority=190,ct_state=+new+trk,ip,nw_src=10.10.0.12,nw_dst=10.10.0.12", + "table=12,priority=200,tcp,reg4=0x10000/0x70000,nw_dst=10.96.0.224,tp_dst=80", + "table=12,priority=190,tcp,reg4=0x30000/0x70000,nw_dst=10.96.0.224,tp_dst=80", + "table=13,priority=200,tcp,reg3=0xa0a000b,reg4=0x20050/0x7ffff", + "table=13,priority=200,tcp,reg3=0xa0a000c,reg4=0x20050/0x7ffff", + "table=21,priority=190,ct_state=+new+trk,ip,nw_src=10.10.0.12,nw_dst=10.10.0.12", } assert.ElementsMatch(t, expectedFlowKeys, flowKeys) } @@ -2787,8 +2821,8 @@ func Test_client_ReplayFlows(t *testing.T) { "cookie=0x1020000000000, table=IngressMetric, priority=200,reg0=0x400/0x400,reg3=0xf actions=drop", ) replayedFlows = append(replayedFlows, - "cookie=0x1020000000000, table=IngressRule, priority=200,conj_id=15 actions=set_field:0xf->reg3,set_field:0x400/0x400->reg0,set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x1b/0xff->reg2,group:4", - "cookie=0x1020000000000, table=IngressDefaultRule, priority=200,reg1=0x64 actions=set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x400000/0x600000->reg0,set_field:0x1c/0xff->reg2,goto_table:Output", + "cookie=0x1020000000000, table=IngressRule, priority=200,conj_id=15 actions=set_field:0xf->reg3,set_field:0x400/0x400->reg0,set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x1c/0xff->reg2,group:4", + "cookie=0x1020000000000, table=IngressDefaultRule, priority=200,reg1=0x64 actions=set_field:0x800/0x1800->reg0,set_field:0x2000000/0xfe000000->reg0,set_field:0x400000/0x600000->reg0,set_field:0x1d/0xff->reg2,goto_table:Output", ) // Feature Pod connectivity replays flows. diff --git a/pkg/agent/openflow/fields.go b/pkg/agent/openflow/fields.go index 78f0845a143..c54985abc70 100644 --- a/pkg/agent/openflow/fields.go +++ b/pkg/agent/openflow/fields.go @@ -72,8 +72,8 @@ var ( DispositionPassRegMark = binding.NewRegMark(APDispositionField, DispositionPass) // reg0[13]: Mark to indicate the packet is a generated reject response packet-out. GeneratedRejectPacketOutRegMark = binding.NewOneBitRegMark(0, 13) - // reg0[14]: Mark to indicate a Service without any Endpoints (used by Proxy) - SvcNoEpRegMark = binding.NewOneBitRegMark(0, 14) + // reg0[14]: Mark to indicate a Service connection should be rejected. + SvcRejectRegMark = binding.NewOneBitRegMark(0, 14) // reg0[19]: Mark to indicate remote SNAT for Egress. RemoteSNATRegMark = binding.NewOneBitRegMark(0, 19) // reg0[20]: Field to indicate redirect action of layer 7 NetworkPolicy. @@ -149,6 +149,8 @@ var ( FromExternalRegMark = binding.NewOneBitRegMark(4, 27) // reg4[28]: Mark to indicate that whether the traffic's source is a local Pod or the Node. FromLocalRegMark = binding.NewOneBitRegMark(4, 28) + // reg4[29]: Mark to indicate that whether the LoadBalancer Service traffic's source is included in the Service loadBalancerSourceRanges. + LoadBalancerSourceRangesRegMark = binding.NewOneBitRegMark(4, 29) // reg5(NXM_NX_REG5) // Field to cache the Egress conjunction ID hit by TraceFlow packet. diff --git a/pkg/agent/openflow/framework.go b/pkg/agent/openflow/framework.go index d8c353ee2a2..ab630527b45 100644 --- a/pkg/agent/openflow/framework.go +++ b/pkg/agent/openflow/framework.go @@ -254,6 +254,7 @@ func (f *featureService) getRequiredTables() []*Table { tables := []*Table{ UnSNATTable, PreRoutingClassifierTable, + ServiceMarkTable, SessionAffinityTable, ServiceLBTable, EndpointDNATTable, @@ -263,9 +264,6 @@ func (f *featureService) getRequiredTables() []*Table { ConntrackCommitTable, OutputTable, } - if f.proxyAll { - tables = append(tables, NodePortMarkTable) - } if f.enableDSR { tables = append(tables, DSRServiceMarkTable) } diff --git a/pkg/agent/openflow/framework_test.go b/pkg/agent/openflow/framework_test.go index 2c69c85ffa5..f26032df75c 100644 --- a/pkg/agent/openflow/framework_test.go +++ b/pkg/agent/openflow/framework_test.go @@ -407,7 +407,7 @@ func TestBuildPipeline(t *testing.T) { }, }, { - name: "K8s Node, IPv4 only, with proxyAll enabled", + name: "K8s Node, IPv4 only, with AntreaProxy enabled", ipStack: ipv4Only, features: []feature{ newTestFeaturePodConnectivity(ipStackMap[ipv4Only]), @@ -426,7 +426,7 @@ func TestBuildPipeline(t *testing.T) { ConntrackTable, ConntrackStateTable, PreRoutingClassifierTable, - NodePortMarkTable, + ServiceMarkTable, SessionAffinityTable, ServiceLBTable, EndpointDNATTable, diff --git a/pkg/agent/openflow/pipeline.go b/pkg/agent/openflow/pipeline.go index 601e910f186..0c20be376d1 100644 --- a/pkg/agent/openflow/pipeline.go +++ b/pkg/agent/openflow/pipeline.go @@ -139,7 +139,7 @@ var ( // Tables in stagePreRouting: // When proxy is enabled. PreRoutingClassifierTable = newTable("PreRoutingClassifier", stagePreRouting, pipelineIP) - NodePortMarkTable = newTable("NodePortMark", stagePreRouting, pipelineIP) + ServiceMarkTable = newTable("ServiceMark", stagePreRouting, pipelineIP) SessionAffinityTable = newTable("SessionAffinity", stagePreRouting, pipelineIP) ServiceLBTable = newTable("ServiceLB", stagePreRouting, pipelineIP) DSRServiceMarkTable = newTable("DSRServiceMark", stagePreRouting, pipelineIP) @@ -2288,7 +2288,7 @@ func (f *featureService) nodePortMarkFlows() []binding.Flow { continue } flows = append(flows, - NodePortMarkTable.ofTable.BuildFlow(priorityNormal). + ServiceMarkTable.ofTable.BuildFlow(priorityNormal). Cookie(cookieID). MatchProtocol(ipProtocol). MatchDstIP(nodePortAddresses[i]). @@ -2298,7 +2298,7 @@ func (f *featureService) nodePortMarkFlows() []binding.Flow { // This generates the flow for the virtual NodePort DNAT IP. The flow is used to mark the first packet of NodePort // connection sourced from the Antrea gateway (the connection is performed DNAT with the virtual IP in host netns). flows = append(flows, - NodePortMarkTable.ofTable.BuildFlow(priorityNormal). + ServiceMarkTable.ofTable.BuildFlow(priorityNormal). Cookie(cookieID). MatchProtocol(ipProtocol). MatchDstIP(f.virtualNodePortDNATIPs[ipProtocol]). @@ -2408,11 +2408,19 @@ func (f *featureService) serviceLBFlows(config *types.ServiceConfig) []binding.F Action().Group(groupID).Done() } flows := []binding.Flow{ - buildFlow(priorityNormal, config.TrafficPolicyGroupID(), nil), + buildFlow(priorityNormal, config.TrafficPolicyGroupID(), func(b binding.FlowBuilder) binding.FlowBuilder { + if len(config.LoadBalancerSourceRanges) != 0 { + b = b.MatchRegMark(LoadBalancerSourceRangesRegMark) + } + return b + }), } if config.IsExternal && config.TrafficPolicyLocal { // For short-circuiting flow, an extra match condition matching packet from a local Pod or the Node is added. flows = append(flows, buildFlow(priorityHigh, config.ClusterGroupID, func(b binding.FlowBuilder) binding.FlowBuilder { + if len(config.LoadBalancerSourceRanges) != 0 { + b = b.MatchRegMark(LoadBalancerSourceRangesRegMark) + } return b.MatchRegMark(FromLocalRegMark) })) } @@ -2551,7 +2559,7 @@ func (f *featureService) serviceEndpointGroup(groupID binding.GroupIDType, withS if len(endpoints) == 0 { return group.Bucket().Weight(100). - LoadRegMark(SvcNoEpRegMark). + LoadRegMark(SvcRejectRegMark). ResubmitToTable(EndpointDNATTable.GetID()). Done() } @@ -3013,7 +3021,7 @@ func (f *featureService) preRoutingClassifierFlows() []binding.Flow { targetTables := []uint8{SessionAffinityTable.GetID(), ServiceLBTable.GetID()} if f.proxyAll { - targetTables = append([]uint8{NodePortMarkTable.GetID()}, targetTables...) + targetTables = append([]uint8{ServiceMarkTable.GetID()}, targetTables...) } for _, ipProtocol := range f.ipProtocols { flows = append(flows, @@ -3106,6 +3114,34 @@ func (f *featureService) gatewaySNATFlows() []binding.Flow { return flows } +func (f *featureService) loadBalancerSourceRangesMarkFlows(config *types.ServiceConfig) []binding.Flow { + cookieID := f.cookieAllocator.Request(f.category).Raw() + protocol := config.Protocol + ingressIP := config.ServiceIP + port := config.ServicePort + var flows []binding.Flow + for _, srcRange := range config.LoadBalancerSourceRanges { + _, srcIPNet, _ := net.ParseCIDR(srcRange) + flows = append(flows, ServiceMarkTable.ofTable.BuildFlow(priorityNormal). + Cookie(cookieID). + MatchProtocol(protocol). + MatchSrcIPNet(*srcIPNet). + MatchDstIP(ingressIP). + MatchDstPort(port, nil). + Action().LoadRegMark(LoadBalancerSourceRangesRegMark). + Done(), + ) + } + flows = append(flows, ServiceMarkTable.ofTable.BuildFlow(priorityLow). + Cookie(cookieID). + MatchProtocol(protocol). + MatchDstIP(ingressIP). + MatchDstPort(port, nil). + Action().LoadRegMark(SvcRejectRegMark). + Done()) + return flows +} + func getCachedFlowMessages(cache *flowCategoryCache) []*openflow15.FlowMod { var flows []*openflow15.FlowMod cache.Range(func(key, value interface{}) bool { diff --git a/pkg/agent/openflow/pipeline_test.go b/pkg/agent/openflow/pipeline_test.go index 451abe7ccc1..e7165e851bb 100644 --- a/pkg/agent/openflow/pipeline_test.go +++ b/pkg/agent/openflow/pipeline_test.go @@ -67,7 +67,8 @@ func pipelineDefaultFlows(egressTrafficShapingEnabled, externalNodeEnabled, isEn "cookie=0x1000000000000, table=UnSNAT, priority=0 actions=goto_table:ConntrackZone", "cookie=0x1000000000000, table=ConntrackZone, priority=0 actions=goto_table:ConntrackState", "cookie=0x1000000000000, table=ConntrackState, priority=0 actions=goto_table:PreRoutingClassifier", - "cookie=0x1000000000000, table=PreRoutingClassifier, priority=0 actions=goto_table:SessionAffinity", + "cookie=0x1000000000000, table=PreRoutingClassifier, priority=0 actions=goto_table:ServiceMark", + "cookie=0x1000000000000, table=ServiceMark, priority=0 actions=goto_table:SessionAffinity", "cookie=0x1000000000000, table=SessionAffinity, priority=0 actions=goto_table:ServiceLB", "cookie=0x1000000000000, table=ServiceLB, priority=0 actions=goto_table:EndpointDNAT", "cookie=0x1000000000000, table=EndpointDNAT, priority=0 actions=goto_table:AntreaPolicyEgressRule", @@ -136,7 +137,8 @@ func pipelineDefaultFlows(egressTrafficShapingEnabled, externalNodeEnabled, isEn "cookie=0x1000000000000, table=UnSNAT, priority=0 actions=goto_table:ConntrackZone", "cookie=0x1000000000000, table=ConntrackZone, priority=0 actions=goto_table:ConntrackState", "cookie=0x1000000000000, table=ConntrackState, priority=0 actions=goto_table:PreRoutingClassifier", - "cookie=0x1000000000000, table=PreRoutingClassifier, priority=0 actions=goto_table:SessionAffinity", + "cookie=0x1000000000000, table=PreRoutingClassifier, priority=0 actions=goto_table:ServiceMark", + "cookie=0x1000000000000, table=ServiceMark, priority=0 actions=goto_table:SessionAffinity", "cookie=0x1000000000000, table=SessionAffinity, priority=0 actions=goto_table:ServiceLB", "cookie=0x1000000000000, table=ServiceLB, priority=0 actions=goto_table:EndpointDNAT", "cookie=0x1000000000000, table=EndpointDNAT, priority=0 actions=goto_table:AntreaPolicyEgressRule", diff --git a/pkg/agent/openflow/service.go b/pkg/agent/openflow/service.go index 2d8a4c0453b..0a30301c0cc 100644 --- a/pkg/agent/openflow/service.go +++ b/pkg/agent/openflow/service.go @@ -135,7 +135,7 @@ func newFeatureService( func (f *featureService) serviceNoEndpointFlow() binding.Flow { return EndpointDNATTable.ofTable.BuildFlow(priorityNormal). Cookie(f.cookieAllocator.Request(f.category).Raw()). - MatchRegMark(SvcNoEpRegMark). + MatchRegMark(SvcRejectRegMark). Action().SendToController([]byte{uint8(PacketInCategorySvcReject)}, false). Done() } diff --git a/pkg/agent/openflow/service_test.go b/pkg/agent/openflow/service_test.go index f8eb226db39..df404bc2df4 100644 --- a/pkg/agent/openflow/service_test.go +++ b/pkg/agent/openflow/service_test.go @@ -49,9 +49,9 @@ func serviceInitFlows(proxyEnabled, isIPv4, proxyAllEnabled, dsrEnabled bool) [] } if proxyAllEnabled { flows = append(flows, - "cookie=0x1030000000000, table=PreRoutingClassifier, priority=200,ip actions=resubmit:NodePortMark,resubmit:SessionAffinity,resubmit:ServiceLB", - "cookie=0x1030000000000, table=NodePortMark, priority=200,ip,nw_dst=192.168.77.100 actions=set_field:0x80000/0x80000->reg4", - "cookie=0x1030000000000, table=NodePortMark, priority=200,ip,nw_dst=169.254.0.252 actions=set_field:0x80000/0x80000->reg4", + "cookie=0x1030000000000, table=PreRoutingClassifier, priority=200,ip actions=resubmit:ServiceMark,resubmit:SessionAffinity,resubmit:ServiceLB", + "cookie=0x1030000000000, table=ServiceMark, priority=200,ip,nw_dst=192.168.77.100 actions=set_field:0x80000/0x80000->reg4", + "cookie=0x1030000000000, table=ServiceMark, priority=200,ip,nw_dst=169.254.0.252 actions=set_field:0x80000/0x80000->reg4", ) } else { flows = append(flows, @@ -82,9 +82,9 @@ func serviceInitFlows(proxyEnabled, isIPv4, proxyAllEnabled, dsrEnabled bool) [] } if proxyAllEnabled { flows = append(flows, - "cookie=0x1030000000000, table=PreRoutingClassifier, priority=200,ipv6 actions=resubmit:NodePortMark,resubmit:SessionAffinity,resubmit:ServiceLB", - "cookie=0x1030000000000, table=NodePortMark, priority=200,ipv6,ipv6_dst=fec0:192:168:77::100 actions=set_field:0x80000/0x80000->reg4", - "cookie=0x1030000000000, table=NodePortMark, priority=200,ipv6,ipv6_dst=fc01::aabb:ccdd:eefe actions=set_field:0x80000/0x80000->reg4", + "cookie=0x1030000000000, table=PreRoutingClassifier, priority=200,ipv6 actions=resubmit:ServiceMark,resubmit:SessionAffinity,resubmit:ServiceLB", + "cookie=0x1030000000000, table=ServiceMark, priority=200,ipv6,ipv6_dst=fec0:192:168:77::100 actions=set_field:0x80000/0x80000->reg4", + "cookie=0x1030000000000, table=ServiceMark, priority=200,ipv6,ipv6_dst=fc01::aabb:ccdd:eefe actions=set_field:0x80000/0x80000->reg4", ) } else { flows = append(flows, diff --git a/pkg/agent/proxy/proxier.go b/pkg/agent/proxy/proxier.go index c2046d8a26e..d8beaec5231 100644 --- a/pkg/agent/proxy/proxier.go +++ b/pkg/agent/proxy/proxier.go @@ -501,7 +501,8 @@ func serviceIdentityChanged(svcInfo, pSvcInfo *types.ServiceInfo) bool { func serviceExternalAddressesChanged(svcInfo, pSvcInfo *types.ServiceInfo) bool { return svcInfo.NodePort() != pSvcInfo.NodePort() || !slices.Equal(svcInfo.LoadBalancerIPStrings(), pSvcInfo.LoadBalancerIPStrings()) || - !slices.Equal(svcInfo.ExternalIPStrings(), pSvcInfo.ExternalIPStrings()) + !slices.Equal(svcInfo.ExternalIPStrings(), pSvcInfo.ExternalIPStrings()) || + !slices.Equal(svcInfo.LoadBalancerSourceRanges(), pSvcInfo.LoadBalancerSourceRanges()) } // smallSliceDifference builds a slice which includes all the strings from s1 @@ -636,6 +637,7 @@ func (p *proxier) uninstallExternalIPService(svcInfoStr string, externalIPString func (p *proxier) installLoadBalancerService(svcInfoStr string, localGroupID, clusterGroupID binding.GroupIDType, + loadBalancerSourceRanges []string, loadBalancerIPStrings []string, svcPort uint16, protocol binding.Protocol, @@ -646,17 +648,18 @@ func (p *proxier) installLoadBalancerService(svcInfoStr string, if ingress != "" { ip := net.ParseIP(ingress) if err := p.ofClient.InstallServiceFlows(&agenttypes.ServiceConfig{ - ServiceIP: ip, - ServicePort: svcPort, - Protocol: protocol, - TrafficPolicyLocal: trafficPolicyLocal, - LocalGroupID: localGroupID, - ClusterGroupID: clusterGroupID, - AffinityTimeout: affinityTimeout, - IsExternal: true, - IsNodePort: false, - IsNested: false, // Unsupported for LoadBalancerIP - IsDSR: features.DefaultFeatureGate.Enabled(features.LoadBalancerModeDSR) && loadBalancerMode == agentconfig.LoadBalancerModeDSR, + ServiceIP: ip, + ServicePort: svcPort, + Protocol: protocol, + TrafficPolicyLocal: trafficPolicyLocal, + LocalGroupID: localGroupID, + ClusterGroupID: clusterGroupID, + AffinityTimeout: affinityTimeout, + IsExternal: true, + IsNodePort: false, + IsNested: false, // Unsupported for LoadBalancerIP + IsDSR: features.DefaultFeatureGate.Enabled(features.LoadBalancerModeDSR) && loadBalancerMode == agentconfig.LoadBalancerModeDSR, + LoadBalancerSourceRanges: loadBalancerSourceRanges, }); err != nil { return fmt.Errorf("failed to install LoadBalancerIP load balancing OVS flows: %w", err) } @@ -895,7 +898,7 @@ func (p *proxier) installServiceFlows(svcInfo *types.ServiceInfo, localGroupID, } // Install LoadBalancer flows and configurations. if p.proxyLoadBalancerIPs { - if err := p.installLoadBalancerService(svcInfoStr, localGroupID, clusterGroupID, svcInfo.LoadBalancerIPStrings(), svcPort, svcProto, svcInfo.ExternalPolicyLocal(), affinityTimeout, loadBalancerMode); err != nil { + if err := p.installLoadBalancerService(svcInfoStr, localGroupID, clusterGroupID, svcInfo.LoadBalancerSourceRanges(), svcInfo.LoadBalancerIPStrings(), svcPort, svcProto, svcInfo.ExternalPolicyLocal(), affinityTimeout, loadBalancerMode); err != nil { klog.ErrorS(err, "Error when installing LoadBalancer flows and configurations for Service", "ServiceInfo", svcInfoStr) return false } @@ -937,13 +940,19 @@ func (p *proxier) updateServiceExternalAddresses(pSvcInfo, svcInfo *types.Servic } } if p.proxyLoadBalancerIPs { - deletedLoadBalancerIPs := smallSliceDifference(pSvcInfo.LoadBalancerIPStrings(), svcInfo.LoadBalancerIPStrings()) - addedLoadBalancerIPs := smallSliceDifference(svcInfo.LoadBalancerIPStrings(), pSvcInfo.LoadBalancerIPStrings()) + var deletedLoadBalancerIPs, addedLoadBalancerIPs []string + if !slices.Equal(svcInfo.LoadBalancerSourceRanges(), pSvcInfo.LoadBalancerSourceRanges()) { + deletedLoadBalancerIPs = pSvcInfo.LoadBalancerIPStrings() + addedLoadBalancerIPs = svcInfo.LoadBalancerIPStrings() + } else { + deletedLoadBalancerIPs = smallSliceDifference(pSvcInfo.LoadBalancerIPStrings(), svcInfo.LoadBalancerIPStrings()) + addedLoadBalancerIPs = smallSliceDifference(svcInfo.LoadBalancerIPStrings(), pSvcInfo.LoadBalancerIPStrings()) + } if err := p.uninstallLoadBalancerService(pSvcInfoStr, deletedLoadBalancerIPs, pSvcPort, pSvcProto); err != nil { klog.ErrorS(err, "Error when uninstalling LoadBalancer flows and configurations for Service", "ServiceInfo", pSvcInfoStr) return false } - if err := p.installLoadBalancerService(svcInfoStr, localGroupID, clusterGroupID, addedLoadBalancerIPs, svcPort, svcProto, svcInfo.ExternalPolicyLocal(), affinityTimeout, loadBalancerMode); err != nil { + if err := p.installLoadBalancerService(svcInfoStr, localGroupID, clusterGroupID, svcInfo.LoadBalancerSourceRanges(), addedLoadBalancerIPs, svcPort, svcProto, svcInfo.ExternalPolicyLocal(), affinityTimeout, loadBalancerMode); err != nil { klog.ErrorS(err, "Error when installing LoadBalancer flows and configurations for Service", "ServiceInfo", svcInfoStr) return false } diff --git a/pkg/agent/proxy/proxier_test.go b/pkg/agent/proxy/proxier_test.go index 48235254447..81677a19e26 100644 --- a/pkg/agent/proxy/proxier_test.go +++ b/pkg/agent/proxy/proxier_test.go @@ -54,23 +54,25 @@ import ( ) var ( - svc1IPv4 = net.ParseIP("10.20.30.41") - svc2IPv4 = net.ParseIP("10.20.30.42") - svc1IPv6 = net.ParseIP("2001::10:20:30:41") - ep1IPv4 = net.ParseIP("10.180.0.1") - ep1IPv6 = net.ParseIP("2001::10:180:0:1") - ep2IPv4 = net.ParseIP("10.180.0.2") - ep2IPv6 = net.ParseIP("2001::10:180:0:2") - loadBalancerIPv4 = net.ParseIP("169.254.169.1") - loadBalancerIPv6 = net.ParseIP("fec0::169:254:169:1") - loadBalancerIPModeProxyIPv4 = net.ParseIP("169.254.169.2") - loadBalancerIPModeProxyIPv6 = net.ParseIP("fec0::169:254:169:2") - svcNodePortIPv4 = net.ParseIP("192.168.77.100") - svcNodePortIPv6 = net.ParseIP("2001::192:168:77:100") - externalIPv4 = net.ParseIP("192.168.77.101") - externalIPv6 = net.ParseIP("2001::192:168:77:101") - nodePortAddressesIPv4 = []net.IP{svcNodePortIPv4} - nodePortAddressesIPv6 = []net.IP{svcNodePortIPv6} + svc1IPv4 = net.ParseIP("10.20.30.41") + svc2IPv4 = net.ParseIP("10.20.30.42") + svc1IPv6 = net.ParseIP("2001::10:20:30:41") + ep1IPv4 = net.ParseIP("10.180.0.1") + ep1IPv6 = net.ParseIP("2001::10:180:0:1") + ep2IPv4 = net.ParseIP("10.180.0.2") + ep2IPv6 = net.ParseIP("2001::10:180:0:2") + loadBalancerIPv4 = net.ParseIP("169.254.169.1") + loadBalancerIPv6 = net.ParseIP("fec0::169:254:169:1") + loadBalancerIPModeProxyIPv4 = net.ParseIP("169.254.169.2") + loadBalancerIPModeProxyIPv6 = net.ParseIP("fec0::169:254:169:2") + loadBalancerSourceRangesIPv4 = []string{"192.168.1.0/24", "192.168.2.0/24"} + loadBalancerSourceRangesIPv6 = []string{"fec0:192:168:1::/64", "fec0:192:168:2::/64"} + svcNodePortIPv4 = net.ParseIP("192.168.77.100") + svcNodePortIPv6 = net.ParseIP("2001::192:168:77:100") + externalIPv4 = net.ParseIP("192.168.77.101") + externalIPv6 = net.ParseIP("2001::192:168:77:101") + nodePortAddressesIPv4 = []net.IP{svcNodePortIPv4} + nodePortAddressesIPv6 = []net.IP{svcNodePortIPv6} svcPort = 80 svcNodePort = 30008 @@ -105,6 +107,13 @@ func loadBalancerIPModeProxyIP(isIPv6 bool) net.IP { return loadBalancerIPModeProxyIPv4 } +func loadBalancerSourceRanges(isIPv6 bool) []string { + if isIPv6 { + return loadBalancerSourceRangesIPv6 + } + return loadBalancerSourceRangesIPv4 +} + func protocolTCP(isIPv6 bool) binding.Protocol { if isIPv6 { return binding.ProtocolTCPv6 @@ -304,6 +313,7 @@ func makeTestNodePortService(svcPortName *k8sproxy.ServicePortName, func makeTestLoadBalancerService(svcPortName *k8sproxy.ServicePortName, clusterIP net.IP, + loadBalancerSourceRanges []string, externalIPs, loadBalancerIPs []net.IP, loadBalancerIPModeProxyIPs []net.IP, @@ -316,6 +326,7 @@ func makeTestLoadBalancerService(svcPortName *k8sproxy.ServicePortName, return makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *corev1.Service) { svc.Spec.ClusterIP = clusterIP.String() svc.Spec.Type = corev1.ServiceTypeLoadBalancer + svc.Spec.LoadBalancerSourceRanges = loadBalancerSourceRanges var ingress []corev1.LoadBalancerIngress for _, ip := range loadBalancerIPs { if ip != nil { @@ -650,6 +661,7 @@ func testLoadBalancerAdd(t *testing.T, ep2IP := ep2IP(isIPv6) loadBalancerIP := loadBalancerIP(isIPv6) loadBalancerIPModeProxyIP := loadBalancerIPModeProxyIP(isIPv6) + loadBalancerSourceRanges := loadBalancerSourceRanges(isIPv6) fp := newFakeProxier(mockRouteClient, mockOFClient, nodePortAddresses, groupAllocator, isIPv6, options...) externalTrafficPolicy := corev1.ServiceExternalTrafficPolicyTypeCluster @@ -662,6 +674,7 @@ func testLoadBalancerAdd(t *testing.T, } svc := makeTestLoadBalancerService(&svcPortName, svcIP, + loadBalancerSourceRanges, []net.IP{externalIP}, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, @@ -724,14 +737,15 @@ func testLoadBalancerAdd(t *testing.T, }) if proxyLoadBalancerIPs { mockOFClient.EXPECT().InstallServiceFlows(&antreatypes.ServiceConfig{ - ServiceIP: loadBalancerIP, - ServicePort: uint16(svcPort), - Protocol: protocol, - TrafficPolicyLocal: nodeLocalExternal, - LocalGroupID: 1, - ClusterGroupID: 2, - IsExternal: true, - IsDSR: isDSR, + ServiceIP: loadBalancerIP, + ServicePort: uint16(svcPort), + Protocol: protocol, + TrafficPolicyLocal: nodeLocalExternal, + LocalGroupID: 1, + ClusterGroupID: 2, + IsExternal: true, + IsDSR: isDSR, + LoadBalancerSourceRanges: loadBalancerSourceRanges, }) } if externalIP != nil { @@ -783,14 +797,15 @@ func testLoadBalancerAdd(t *testing.T, }) if proxyLoadBalancerIPs { mockOFClient.EXPECT().InstallServiceFlows(&antreatypes.ServiceConfig{ - ServiceIP: loadBalancerIP, - ServicePort: uint16(svcPort), - Protocol: protocol, - TrafficPolicyLocal: nodeLocalExternal, - LocalGroupID: localGroupID, - ClusterGroupID: clusterGroupID, - IsExternal: true, - IsDSR: isDSR, + ServiceIP: loadBalancerIP, + ServicePort: uint16(svcPort), + Protocol: protocol, + TrafficPolicyLocal: nodeLocalExternal, + LocalGroupID: localGroupID, + ClusterGroupID: clusterGroupID, + IsExternal: true, + IsDSR: isDSR, + LoadBalancerSourceRanges: loadBalancerSourceRanges, }) } if externalIP != nil { @@ -1741,6 +1756,7 @@ func testLoadBalancerRemove(t *testing.T, protocol binding.Protocol, isIPv6 bool epIP := ep1IP(isIPv6) loadBalancerIP := loadBalancerIP(isIPv6) loadBalancerIPModeProxyIP := loadBalancerIPModeProxyIP(isIPv6) + loadBalancerSourceRanges := loadBalancerSourceRanges(isIPv6) fp := newFakeProxier(mockRouteClient, mockOFClient, nodePortAddresses, groupAllocator, isIPv6, options...) externalTrafficPolicy := corev1.ServiceExternalTrafficPolicyTypeLocal @@ -1748,6 +1764,7 @@ func testLoadBalancerRemove(t *testing.T, protocol binding.Protocol, isIPv6 bool svc := makeTestLoadBalancerService(&svcPortName, svcIP, + loadBalancerSourceRanges, []net.IP{externalIP}, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, @@ -1793,13 +1810,14 @@ func testLoadBalancerRemove(t *testing.T, protocol binding.Protocol, isIPv6 bool IsNodePort: true, }) mockOFClient.EXPECT().InstallServiceFlows(&antreatypes.ServiceConfig{ - ServiceIP: loadBalancerIP, - ServicePort: uint16(svcPort), - Protocol: protocol, - TrafficPolicyLocal: true, - LocalGroupID: 1, - ClusterGroupID: 2, - IsExternal: true, + ServiceIP: loadBalancerIP, + ServicePort: uint16(svcPort), + Protocol: protocol, + TrafficPolicyLocal: true, + LocalGroupID: 1, + ClusterGroupID: 2, + IsExternal: true, + LoadBalancerSourceRanges: loadBalancerSourceRanges, }) mockRouteClient.EXPECT().AddNodePortConfigs(nodePortAddresses, uint16(svcNodePort), protocol) mockRouteClient.EXPECT().AddExternalIPConfigs(svcInfoStr, loadBalancerIP) @@ -2167,6 +2185,7 @@ func testLoadBalancerNoEndpoint(t *testing.T, protocol binding.Protocol, isIPv6 svc := makeTestLoadBalancerService(&svcPortName, svcIP, nil, + nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), @@ -2178,6 +2197,7 @@ func testLoadBalancerNoEndpoint(t *testing.T, protocol binding.Protocol, isIPv6 updatedSvc := makeTestLoadBalancerService(&svcPortName, svcIP, nil, + nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort+1), @@ -2367,6 +2387,7 @@ func testLoadBalancerRemoveEndpoints(t *testing.T, protocol binding.Protocol, is svc := makeTestLoadBalancerService(&svcPortName, svcIP, + nil, []net.IP{externalIP}, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, @@ -2594,8 +2615,8 @@ func testServicePortUpdate(t *testing.T, protocol binding.Protocol, isIPv6 bool, svc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) updatedSvc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort+1), int32(svcNodePort), apiProtocol, nil, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) case corev1.ServiceTypeLoadBalancer: - svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) - updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, nil, int32(svcPort+1), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, nil, int32(svcPort+1), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) } makeServiceMap(fp, svc) svcInfoStr := fmt.Sprintf("%s:%d/%s", svcIP, svcPort, apiProtocol) @@ -2751,8 +2772,8 @@ func testServiceNodePortUpdate(t *testing.T, protocol binding.Protocol, isIPv6 b svc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) updatedSvc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort), int32(svcNodePort+1), apiProtocol, nil, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) case corev1.ServiceTypeLoadBalancer: - svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) - updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort+1), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort+1), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) } makeServiceMap(fp, svc) svcInfoStr := fmt.Sprintf("%s:%d/%s", svcIP, svcPort, apiProtocol) @@ -2878,7 +2899,7 @@ func testServiceExternalTrafficPolicyUpdate(t *testing.T, protocol binding.Proto case corev1.ServiceTypeNodePort: svc = makeTestNodePortService(&svcPortName, svcIP, []net.IP{externalIP}, int32(svcPort), int32(svcNodePort), apiProtocol, nil, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) case corev1.ServiceTypeLoadBalancer: - svc = makeTestLoadBalancerService(&svcPortName, svcIP, []net.IP{externalIP}, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{externalIP}, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) } updatedSvc = svc.DeepCopy() updatedSvc.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal @@ -3145,6 +3166,140 @@ func TestServiceInternalTrafficPolicyUpdate(t *testing.T) { }) } +func testServiceLoadBalancerSourceRangesUpdate(t *testing.T, + loadBalancerIPs []net.IP, + updatedLoadBalancerIPs []net.IP, + loadBalancerSourceRanges []string, + updatedLoadBalancerSourceRanges []string, + protocol binding.Protocol, + isIPv6 bool) { + ctrl := gomock.NewController(t) + vIP := virtualNodePortDNATIP(isIPv6) + apiProtocol := getAPIProtocol(protocol) + nodePortAddresses := nodePortAddresses(isIPv6) + svcIP := svc1IP(isIPv6) + epIP := ep1IP(isIPv6) + svcInfoStr := fmt.Sprintf("%s:%d/%s", svcIP, svcPort, apiProtocol) + + mockOFClient, mockRouteClient := getMockClients(ctrl) + groupAllocator := openflow.NewGroupAllocator() + fp := newFakeProxier(mockRouteClient, mockOFClient, nodePortAddresses, groupAllocator, isIPv6, withProxyAll) + + svc := makeTestLoadBalancerService(&svcPortName, svcIP, loadBalancerSourceRanges, nil, loadBalancerIPs, nil, int32(svcPort), int32(svcNodePort), corev1.ProtocolTCP, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + updatedSvc := makeTestLoadBalancerService(&svcPortName, svcIP, updatedLoadBalancerSourceRanges, nil, updatedLoadBalancerIPs, nil, int32(svcPort), int32(svcNodePort), corev1.ProtocolTCP, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + makeServiceMap(fp, svc) + + ep, epPort := makeTestEndpointSliceEndpointAndPort(&svcPortName, epIP, int32(svcPort), corev1.ProtocolTCP, false) + eps := makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, []discovery.Endpoint{*ep}, []discovery.EndpointPort{*epPort}, isIPv6) + makeEndpointSliceMap(fp, eps) + + expectedEps := []k8sproxy.Endpoint{k8sproxy.NewBaseEndpointInfo(epIP.String(), "", "", svcPort, false, true, true, false, nil)} + mockOFClient.EXPECT().InstallEndpointFlows(protocol, gomock.InAnyOrder(expectedEps)).Times(1) + mockOFClient.EXPECT().InstallServiceGroup(binding.GroupIDType(1), false, gomock.InAnyOrder(expectedEps)).Times(1) + mockOFClient.EXPECT().InstallServiceFlows(&antreatypes.ServiceConfig{ + ServiceIP: svcIP, + ServicePort: uint16(svcPort), + Protocol: protocol, + ClusterGroupID: 1, + }).Times(1) + mockOFClient.EXPECT().InstallServiceFlows(&antreatypes.ServiceConfig{ + ServiceIP: vIP, + ServicePort: uint16(svcNodePort), + Protocol: protocol, + ClusterGroupID: 1, + IsExternal: true, + IsNodePort: true, + }).Times(1) + for _, ip := range loadBalancerIPs { + mockOFClient.EXPECT().InstallServiceFlows(&antreatypes.ServiceConfig{ + ServiceIP: ip, + ServicePort: uint16(svcPort), + Protocol: protocol, + ClusterGroupID: 1, + IsExternal: true, + LoadBalancerSourceRanges: loadBalancerSourceRanges, + }).Times(1) + mockRouteClient.EXPECT().AddExternalIPConfigs(svcInfoStr, ip).Times(1) + } + mockRouteClient.EXPECT().AddNodePortConfigs(nodePortAddresses, uint16(svcNodePort), protocol).Times(1) + + for _, ip := range loadBalancerIPs { + mockOFClient.EXPECT().UninstallServiceFlows(ip, uint16(svcPort), protocol).Times(1) + mockRouteClient.EXPECT().DeleteExternalIPConfigs(svcInfoStr, ip).Times(1) + } + + for _, ip := range updatedLoadBalancerIPs { + mockOFClient.EXPECT().InstallServiceFlows(&antreatypes.ServiceConfig{ + ServiceIP: ip, + ServicePort: uint16(svcPort), + Protocol: protocol, + ClusterGroupID: 1, + IsExternal: true, + LoadBalancerSourceRanges: updatedLoadBalancerSourceRanges, + }).Times(1) + mockRouteClient.EXPECT().AddExternalIPConfigs(svcInfoStr, ip).Times(1) + } + + fp.syncProxyRules() + assert.Contains(t, fp.serviceInstalledMap, svcPortName) + assert.Contains(t, fp.endpointsInstalledMap, svcPortName) + fp.serviceChanges.OnServiceUpdate(svc, updatedSvc) + fp.syncProxyRules() + assert.Contains(t, fp.serviceInstalledMap, svcPortName) + assert.Contains(t, fp.endpointsInstalledMap, svcPortName) +} + +func TestServiceLoadBalancerSourceRangesUpdate(t *testing.T) { + t.Run("IPv4", func(t *testing.T) { + loadBalancerIPs := []net.IP{net.ParseIP("169.254.1.1"), net.ParseIP("169.254.1.2")} + updatedLoadBalancerIPs := []net.IP{net.ParseIP("169.254.1.2"), net.ParseIP("169.254.1.3")} + loadBalancerSourceRanges := []string{"192.168.1.0/24", "192.168.2.0/24"} + updatedLoadBalancerSourceRanges := []string{"192.168.3.0/24", "192.168.4.0/24"} + t.Run("LoadBalancer ingress IPs update", func(t *testing.T) { + testServiceLoadBalancerSourceRangesUpdate(t, + loadBalancerIPs, + updatedLoadBalancerIPs, + loadBalancerSourceRanges, + updatedLoadBalancerSourceRanges, + binding.ProtocolTCP, + false) + }) + t.Run("LoadBalancer ingress IPs remain", func(t *testing.T) { + testServiceLoadBalancerSourceRangesUpdate(t, + loadBalancerIPs, + loadBalancerIPs, + loadBalancerSourceRanges, + updatedLoadBalancerSourceRanges, + binding.ProtocolTCP, + false) + }) + }) + t.Run("IPv6", func(t *testing.T) { + loadBalancerIPs := []net.IP{net.ParseIP("fec0::169:254:1:1"), net.ParseIP("fec0::169:254:1:2")} + updatedLoadBalancerIPs := []net.IP{net.ParseIP("fec0::169:254:1:2"), net.ParseIP("fec0::169:254:1:3")} + loadBalancerSourceRanges := []string{"fec0:192:168:1::/64", "fec0:192:168:2::/64"} + updatedLoadBalancerSourceRanges := []string{"fec0:192:168:3::/64", "fec0:192:168:4::/64"} + t.Run("LoadBalancer ingress IPs update", func(t *testing.T) { + testServiceLoadBalancerSourceRangesUpdate(t, + loadBalancerIPs, + updatedLoadBalancerIPs, + loadBalancerSourceRanges, + updatedLoadBalancerSourceRanges, + binding.ProtocolTCPv6, + true) + }) + t.Run("LoadBalancer ingress IPs remain", func(t *testing.T) { + testServiceLoadBalancerSourceRangesUpdate(t, + loadBalancerIPs, + updatedLoadBalancerIPs, + loadBalancerSourceRanges, + updatedLoadBalancerSourceRanges, + binding.ProtocolTCPv6, + true) + }) + }) +} + func testServiceExternalIPsUpdate(t *testing.T, protocol binding.Protocol, isIPv6 bool) { ctrl := gomock.NewController(t) mockOFClient, mockRouteClient := getMockClients(ctrl) @@ -3183,8 +3338,8 @@ func testServiceExternalIPsUpdate(t *testing.T, protocol binding.Protocol, isIPv updatedExternalIPStrings = append(updatedExternalIPStrings, ip.String()) } - svc := makeTestLoadBalancerService(&svcPortName, svcIP, externalIPs, loadBalancerIPs, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) - updatedSvc := makeTestLoadBalancerService(&svcPortName, svcIP, updatedExternalIPs, updatedLoadBalancerIPs, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + svc := makeTestLoadBalancerService(&svcPortName, svcIP, nil, externalIPs, loadBalancerIPs, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + updatedSvc := makeTestLoadBalancerService(&svcPortName, svcIP, nil, updatedExternalIPs, updatedLoadBalancerIPs, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) makeServiceMap(fp, svc) svcInfoStr := fmt.Sprintf("%s:%d/%s", svcIP, svcPort, apiProtocol) @@ -3329,8 +3484,8 @@ func testServiceStickyMaxAgeSecondsUpdate(t *testing.T, protocol binding.Protoco svc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort), int32(svcNodePort), apiProtocol, &affinitySeconds, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) updatedSvc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort), int32(svcNodePort), apiProtocol, &updatedAffinitySeconds, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) case corev1.ServiceTypeLoadBalancer: - svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, &affinitySeconds, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) - updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, &updatedAffinitySeconds, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, &affinitySeconds, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, &updatedAffinitySeconds, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) } makeServiceMap(fp, svc) svcInfoStr := fmt.Sprintf("%s:%d/%s", svcIP, svcPort, apiProtocol) @@ -3483,8 +3638,8 @@ func testServiceSessionAffinityTypeUpdate(t *testing.T, protocol binding.Protoco svc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort), int32(svcNodePort), apiProtocol, nil, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) updatedSvc = makeTestNodePortService(&svcPortName, svcIP, nil, int32(svcPort), int32(svcNodePort), apiProtocol, &affinitySeconds, corev1.ServiceInternalTrafficPolicyCluster, corev1.ServiceExternalTrafficPolicyTypeCluster) case corev1.ServiceTypeLoadBalancer: - svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) - updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, &affinitySeconds, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + svc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, nil, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) + updatedSvc = makeTestLoadBalancerService(&svcPortName, svcIP, nil, nil, []net.IP{loadBalancerIP}, []net.IP{loadBalancerIPModeProxyIP}, int32(svcPort), int32(svcNodePort), apiProtocol, &affinitySeconds, nil, corev1.ServiceExternalTrafficPolicyTypeCluster) } makeServiceMap(fp, svc) svcInfoStr := fmt.Sprintf("%s:%d/%s", svcIP, svcPort, apiProtocol) diff --git a/pkg/agent/types/service.go b/pkg/agent/types/service.go index 3f76631475c..5c1433d80f3 100644 --- a/pkg/agent/types/service.go +++ b/pkg/agent/types/service.go @@ -36,7 +36,8 @@ type ServiceConfig struct { // IsNested indicates the whether Service's Endpoints are ClusterIPs of other Services. It's used in multi-cluster. IsNested bool // IsDSR indicates that whether the Service works in Direct Server Return mode. - IsDSR bool + IsDSR bool + LoadBalancerSourceRanges []string } func (c *ServiceConfig) TrafficPolicyGroupID() openflow.GroupIDType {