Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test OOM by control frames attack #615

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
7 changes: 7 additions & 0 deletions helpers/deproxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -991,6 +991,7 @@ def __init__(
self.bind_addr = bind_addr or tf_cfg.cfg.get("Client", "ip")
self.error_codes = []
self.socket_family = socket_family
self._socket_options = list()

def __stop_client(self):
dbg(self, 4, "Stop", prefix="\t")
Expand All @@ -1009,13 +1010,19 @@ def run_start(self):
self.create_socket(
socket.AF_INET if self.socket_family == "ipv4" else socket.AF_INET6, socket.SOCK_STREAM
)
for option in self._socket_options:
self.socket.setsockopt(option[0], option[1], option[2])
if self.bind_addr:
self.bind((self.bind_addr, 0))
self.connect((self.conn_addr, self.port))

def clear(self):
self.request_buffer = ""

def add_socket_options(self, level: int, optname: int, value_: int):
"""This method should be used before `run_start`."""
self._socket_options.append((level, optname, value_))

def set_request(self, message_chain):
if message_chain:
self.request = message_chain.request
Expand Down
2 changes: 2 additions & 0 deletions helpers/dmesg.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,4 +149,6 @@ def func_wrapper(*args, **kwargs):
cmd = "echo {} > /proc/sys/net/core/message_cost".format(msg_cost.decode())
node.run_cmd(cmd)

func_wrapper.__name__ = func.__name__

return func_wrapper
3 changes: 2 additions & 1 deletion http2_general/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
"test_h2_headers",
"test_h2_sticky_cookie",
"test_h2_max_frame_size",
"test_h2_block_action"
"test_h2_block_action",
"test_max_queued_control_frames",
]

# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
173 changes: 173 additions & 0 deletions http2_general/test_max_queued_control_frames.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
"""The functional tests for `max_queued_control_frames` directive."""

__author__ = "Tempesta Technologies, Inc."
__copyright__ = "Copyright (C) 2024 Tempesta Technologies, Inc."
__license__ = "GPL2"

import socket
import time

from h2.settings import SettingCodes
from hyperframe.frame import HeadersFrame, PingFrame, PriorityFrame, SettingsFrame

from framework import tester
from framework.parameterize import param, parameterize
from helpers import dmesg


class TestH2ControlFramesFlood(tester.TempestaTest):
clients = [
{
"id": "deproxy",
"type": "deproxy_h2", # "deproxy" for HTTP/1
"addr": "${tempesta_ip}",
"port": "443",
"ssl": True,
"ssl_hostname": "tempesta-tech.com",
},
]

backends = [
{
"id": "deproxy",
"type": "deproxy",
"port": "8000",
"response": "static",
"response_content": (
"HTTP/1.1 200 OK\r\n" + "Content-Length: 200000\r\n\r\n" + ("a" * 200000)
),
}
]

tempesta = {
"config": """
keepalive_timeout 1000;
listen 443 proto=h2;
max_concurrent_streams 20000;

tls_match_any_server_name;

srv_group default {
server ${server_ip}:8000;
}
%s
vhost tempesta-tech.com {
tls_certificate ${tempesta_workdir}/tempesta.crt;
tls_certificate_key ${tempesta_workdir}/tempesta.key;
proxy_pass default;
}
"""
}

def __update_tempesta_config(self, limit: int) -> None:
limit = "" if limit == 10000 else f"max_queued_control_frames {limit};"
self.get_tempesta().config.defconfig = self.get_tempesta().config.defconfig % limit

def __init_connection_and_disable_readable(self, client) -> None:
client.update_initial_settings()
client.send_bytes(client.h2_connection.data_to_send())
self.assertTrue(client.wait_for_ack_settings())

client.make_request(client.create_request(method="GET", headers=[]))
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is no need to request a large response now, because the sockbuf is already set to a small enough size. Also we don't need the backend, because control-frame-slow-read does not involve the backend.

client.readable = lambda: False # disable socket for reading

@parameterize.expand(
[
param(
name="ping",
frame=PingFrame(0, b"\x00\x01\x02\x03\x04\x05\x06\x07").serialize(),
),
param(
name="settings",
frame=SettingsFrame(
settings={k: 100 for k in (SettingCodes.MAX_CONCURRENT_STREAMS,)}
).serialize(),
),
]
)
@dmesg.unlimited_rate_on_tempesta_node
def test(self, name, frame):
self.__update_tempesta_config(100)

client = self.get_client("deproxy")
# Set a small buffer for the client. it is needed for stability of tests.
# The different VMs have a different size of buffer
client.add_socket_options(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)

self.start_all_services()
# requests a large data from backend and disable socket for reading.
# Tempesta can not send any data to the client until the client enable socket for reading
self.__init_connection_and_disable_readable(client)

# wait until the client buffer is full
time.sleep(2)

# the client should send more frames for stability of test
for _ in range(110): # max_queued_control_frames is 100.
client.send_bytes(frame, expect_response=False)

self.assertTrue(
self.oops.find(
"Warning: Too many control frames in send queue, closing connection",
dmesg.amount_positive,
),
"An unexpected number of dmesg warnings",
)
client.readable = lambda: True # enable socket for reading
self.assertTrue(
client.wait_for_connection_close(),
"TempestaFW did not block client after exceeding `max_queued_control_frames` limit.",
)

@parameterize.expand(
[
param(name="default_limit", limit=10000),
param(name="100_limit", limit=100),
]
)
@dmesg.unlimited_rate_on_tempesta_node
def test_reset_stream(self, name, limit: int):
self.__update_tempesta_config(limit)

client = self.get_client("deproxy")
# Set a small buffer for the client. it is needed for stability of tests.
# The different VMs have a different size of buffer
client.add_socket_options(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)

self.start_all_services()
# requests a large data from backend and disable socket for reading.
# Tempesta can not send any data to the client until the client enable socket for reading
self.__init_connection_and_disable_readable(client)

# wait until the client buffer is full
time.sleep(2)

headers = [
(":method", "GET"),
(":path", "/"),
(":authority", "tempesta-tech.com"),
(":scheme", "https"),
]

# the client should send more frames for stability of test
for i in range(3, int(limit * 2.2), 2):
hf = HeadersFrame(
stream_id=i,
data=client.h2_connection.encoder.encode(headers),
flags=["END_HEADERS"],
).serialize()
prio = PriorityFrame(stream_id=i, depends_on=i).serialize()
client.send_bytes(hf + prio, expect_response=False)

self.assertTrue(
self.oops.find(
"Warning: Too many control frames in send queue, closing connection",
dmesg.amount_positive,
),
"An unexpected number of dmesg warnings",
)
client.readable = lambda: True # enable socket for reading
self.assertTrue(
client.wait_for_connection_close(),
"TempestaFW did not block client after exceeding `max_queued_control_frames` limit.",
)
12 changes: 12 additions & 0 deletions tests_disabled_tcpseg.json
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,18 @@
{
"name": "http2_general.test_h2_streams.TestH2Stream.test_max_concurrent_stream",
"reason": "Disabled by test issue #525"
},
{
"name": "t_stress.test_flood.TestH2ControlFramesFlood.test_ping",
"reason": "Disabled by test issue #612"
},
{
"name": "t_stress.test_flood.TestH2ControlFramesFlood.test_settings",
"reason": "Disabled by test issue #612"
},
{
"name": "t_stress.test_flood.TestH2ControlFramesFlood.test_reset_stream",
"reason": "Disabled by test issue #612"
}
]
}