diff --git a/.readthedocs.yml b/.readthedocs.yml index 4d21d9b404..e7c2b25d55 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -12,8 +12,12 @@ sphinx: # Optionally build your docs in additional formats such as PDF and ePub formats: all +build: + os: ubuntu-22.04 + tools: + python: "3.11" + # Optionally set the version of Python and requirements required to build your docs python: - version: 3.7 install: - requirements: doc/requirements.txt diff --git a/API_CHANGES.md b/API_CHANGES.md index 98a08f09d9..61d8781fba 100644 --- a/API_CHANGES.md +++ b/API_CHANGES.md @@ -4,6 +4,10 @@ API Changes When an addition to the existing API is made, the minor version is bumped. When an API feature or function is removed or changed, the major version is bumped. +2.5.0 +===== +Add in support for specifying a type override for object_from_symbol + 2.4.0 ===== Add a `get_size()` method to Windows VAD structures and fix several off-by-one issues when calculating VAD sizes. diff --git a/doc/requirements.txt b/doc/requirements.txt index 93d6ea70a7..b715e59f58 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,4 +1,8 @@ # These packages are required for building the documentation. -sphinx>=4.0.0 +sphinx>=4.0.0,<7 sphinx_autodoc_typehints>=1.4.0 sphinx-rtd-theme>=0.4.3 + +yara-python +pycryptodome +pefile diff --git a/doc/source/basics.rst b/doc/source/basics.rst index d493c61b37..1b8e647808 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1,7 +1,7 @@ Volatility 3 Basics =================== -Volatility splits memory analysis down to several components: +Volatility splits memory analysis down to several components. The main ones are: * Memory layers * Templates and Objects @@ -13,22 +13,65 @@ which acts as a container for all the various layers and tables necessary to con Memory layers ------------- -A memory layer is a body of data that can be accessed by requesting data at a specific address. Memory is seen as -sequential when accessed through sequential addresses, however, there is no obligation for the data to be stored -sequentially, and modern processors tend to store the memory in a paged format. Moreover, there is no need for the data -to be stored in an easily accessible format, it could be encoded or encrypted or more, it could be the combination of -two other sources. These are typically handled by programs that process file formats, or the memory manager of the -processor, but these are all translations (either in the geometric or linguistic sense) of the original data. - -In Volatility 3 this is represented by a directed graph, whose end nodes are -:py:class:`DataLayers ` and whose internal nodes are -specifically called a :py:class:`TranslationLayer `. -In this way, a raw memory image in the LiME file format and a page file can be -combined to form a single Intel virtual memory layer. When requesting addresses from the Intel layer, it will use the -Intel memory mapping algorithm, along with the address of the directory table base or page table map, to translate that +A memory layer is a body of data that can be accessed by requesting data at a specific address. At its lowest level +this data is stored on a phyiscal medium (RAM) and very early computers addresses locations in memory directly. However, +as the size of memory increased and it became more difficult to manage memory most architectures moved to a "paged" model +of memory, where the available memory is cut into specific fixed-sized pages. To help further, programs can ask for any address +and the processor will look up their (virtual) address in a map, to find out where the (physical) address that it lives at is, +in the actual memory of the system. + +Volatility can work with these layers as long as it knows the map (so, for example that virtual address `1` looks up at physical +address `9`). The automagic that runs at the start of every volatility session often locates the kernel's memory map, and creates +a kernel virtual layer, which allows for kernel addresses to be looked up and the correct data returned. There can, however, be +several maps, and in general there is a different map for each process (although a portion of the operating system's memory is +usually mapped to the same location across all processes). The maps may take the same address but point to a different part of +physical memory. It also means that two processes could theoretically share memory, but having an virtual address mapped to the +same physical address as another process. See the worked example below for more information. + +To translate an address on a layer, call :py:meth:`layer.mapping(offset, length, ignore_errors) ` and it will return a list of chunks without overlap, in order, +for the requested range. If a portion cannot be mapped, an exception will be thrown unless `ignore_errors` is true. Each +chunk will contain the original offset of the chunk, the translated offset, the original size and the translated size of +the chunk, as well as the lower layer the chunk lives within. + +Worked example +^^^^^^^^^^^^^^ + +The operating system and two programs may all appear to have access to all of physical memory, but actually the maps they each have +mean they each see something different: + +.. code-block:: + :caption: Memory mapping example + + Operating system map Physical Memory + 1 -> 9 1 - Free + 2 -> 3 2 - OS.4, Process 1.4, Process 2.4 + 3 -> 7 3 - OS.2 + 4 -> 2 4 - Free + 5 - Free + Process 1 map 6 - Process 1.2, Process 2.3 + 1 -> 12 7 - OS.3 + 2 -> 6 8 - Process1.3 + 3 -> 8 9 - OS.1 + 4 -> 2 10 - Process2.1 + 11 - Free + Process 2 map 12 - Process1.1 + 1 -> 10 13 - Free + 2 -> 15 14 - Free + 3 -> 6 15 - Process2.2 + 4 -> 2 16 - Free + +In this example, part of the operating system is visible across all processes (although not all processes can write to the memory, there +is a permissions model for intel addressing which is not discussed further here).) + +In Volatility 3 mappings are represented by a directed graph of layers, whose end nodes are +:py:class:`DataLayers ` and whose internal nodes are :py:class:`TranslationLayers `. +In this way, a raw memory image in the LiME file format and a page file can be combined to form a single Intel virtual +memory layer. When requesting addresses from the Intel layer, it will use the Intel memory mapping algorithm, along +with the address of the directory table base or page table map, to translate that address into a physical address, which will then either be directed towards the swap layer or the LiME layer. Should it -be directed towards the LiME layer, the LiME file format algorithm will be translated to determine where within the file -the data is stored and that will be returned. +be directed towards the LiME layer, the LiME file format algorithm will be translate the new address to determine where +within the file the data is stored. When the :py:meth:`layer.read() ` +method is called, the translation is done automatically and the correct data gathered and combined. .. note:: Volatility 2 had a similar concept, called address spaces, but these could only stack linearly one on top of another. diff --git a/doc/source/conf.py b/doc/source/conf.py index 895219b250..8b467ec1d1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -21,57 +21,72 @@ def setup(app): - volatility_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'volatility3')) + volatility_directory = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "..", "volatility3") + ) source_dir = os.path.abspath(os.path.dirname(__file__)) - sphinx.ext.apidoc.main(argv = ['-e', '-M', '-f', '-T', '-o', source_dir, volatility_directory]) + sphinx.ext.apidoc.main( + argv=["-e", "-M", "-f", "-T", "-o", source_dir, volatility_directory] + ) # Go through the volatility3.framework.plugins files and change them to volatility3.plugins for dir, _, files in os.walk(os.path.dirname(__file__)): for filename in files: - if filename.startswith('volatility3.framework.plugins') and filename != 'volatility3.framework.plugins.rst': + if ( + filename.startswith("volatility3.framework.plugins") + and filename != "volatility3.framework.plugins.rst" + ): # Change all volatility3.framework.plugins to volatility3.plugins in the file # Rename the file - new_filename = filename.replace('volatility3.framework.plugins', 'volatility3.plugins') + new_filename = filename.replace( + "volatility3.framework.plugins", "volatility3.plugins" + ) replace_string = b"Submodules\n----------\n\n.. toctree::\n\n" submodules = replace_string # If file already exists, read out the subpackages entries from it add them to the new list if os.path.exists(os.path.join(dir, new_filename)): - with open(os.path.join(dir, new_filename), 'rb') as newfile: + with open(os.path.join(dir, new_filename), "rb") as newfile: data = newfile.read() index = data.find(replace_string) if index > -1: submodules = data[index:] - with open(os.path.join(dir, new_filename), 'wb') as newfile: + with open(os.path.join(dir, new_filename), "wb") as newfile: with open(os.path.join(dir, filename), "rb") as oldfile: line = oldfile.read() - correct_plugins = line.replace(b'volatility3.framework.plugins', b'volatility3.plugins') - correct_submodules = correct_plugins.replace(replace_string, submodules) + correct_plugins = line.replace( + b"volatility3.framework.plugins", b"volatility3.plugins" + ) + correct_submodules = correct_plugins.replace( + replace_string, submodules + ) newfile.write(correct_submodules) os.remove(os.path.join(dir, filename)) - elif filename == 'volatility3.framework.rst': + elif filename == "volatility3.framework.rst": with open(os.path.join(dir, filename), "rb") as contents: lines = contents.readlines() plugins_seen = False with open(os.path.join(dir, filename), "wb") as contents: for line in lines: - if b'volatility3.framework.plugins' in line: + if b"volatility3.framework.plugins" in line: plugins_seen = True - if plugins_seen and line == b'': - contents.write(b' volatility3.plugins') + if plugins_seen and line == b"": + contents.write(b" volatility3.plugins") contents.write(line) - elif filename == 'volatility3.plugins.rst': + elif filename == "volatility3.plugins.rst": with open(os.path.join(dir, filename), "rb") as contents: lines = contents.readlines() - with open(os.path.join(dir, 'volatility3.framework.plugins.rst'), "rb") as contents: + with open( + os.path.join(dir, "volatility3.framework.plugins.rst"), "rb" + ) as contents: real_lines = contents.readlines() # Process real_lines for line_index in range(len(real_lines)): - if b'Submodules' in real_lines[line_index]: + if b"Submodules" in real_lines[line_index]: break else: line_index = len(real_lines) @@ -82,36 +97,52 @@ def setup(app): for line in lines: contents.write(line) for line in submodule_lines: - contents.write(line.replace(b'volatility3.framework.plugins', b'volatility3.plugins')) + contents.write( + line.replace( + b"volatility3.framework.plugins", b"volatility3.plugins" + ) + ) # Clear up the framework.plugins page - with open(os.path.join(os.path.dirname(__file__), 'volatility3.framework.plugins.rst'), "rb") as contents: + with open( + os.path.join(os.path.dirname(__file__), "volatility3.framework.plugins.rst"), + "rb", + ) as contents: real_lines = contents.readlines() - with open(os.path.join(os.path.dirname(__file__), 'volatility3.framework.plugins.rst'), "wb") as contents: + with open( + os.path.join(os.path.dirname(__file__), "volatility3.framework.plugins.rst"), + "wb", + ) as contents: for line in real_lines: - if b'volatility3.framework.plugins.' not in line: + if b"volatility3.framework.plugins." not in line: contents.write(line) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../..')) +sys.path.insert(0, os.path.abspath("../..")) from volatility3.framework import constants # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '2.0' +needs_sphinx = "2.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', - 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.autosectionlabel' + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", + "sphinx.ext.autosectionlabel", ] autosectionlabel_prefix_document = True @@ -119,7 +150,7 @@ def setup(app): try: import sphinx_autodoc_typehints - extensions.append('sphinx_autodoc_typehints') + extensions.append("sphinx_autodoc_typehints") except ImportError: # If the autodoc typehints extension isn't available, carry on regardless pass @@ -128,17 +159,17 @@ def setup(app): # templates_path = ['tools/templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Volatility 3' -copyright = '2012-2022, Volatility Foundation' +project = "Volatility 3" +copyright = "2012-2022, Volatility Foundation" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -147,7 +178,7 @@ def setup(app): # The full version, including alpha/beta/rc tags. release = constants.PACKAGE_VERSION # The short X.Y version. -version = ".".join(release.split('.')[0:2]) +version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -180,7 +211,7 @@ def setup(app): # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -196,8 +227,8 @@ def setup(app): # html_theme = 'pydoctheme' # html_theme_options = {'collapsiblesidebar': True} # html_theme_path = ['tools'] -html_theme = 'sphinx_rtd_theme' -html_theme_options = {'logo_only': True} +html_theme = "sphinx_rtd_theme" +html_theme_options = {"logo_only": True} # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -216,17 +247,17 @@ def setup(app): # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = '_static/vol.png' +html_logo = "_static/vol.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '_static/favicon.ico' +html_favicon = "_static/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied @@ -275,17 +306,15 @@ def setup(app): # html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'Volatilitydoc' +htmlhelp_basename = "Volatilitydoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # 'preamble': '', } @@ -294,7 +323,13 @@ def setup(app): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ('index', 'Volatility.tex', 'Volatility 3 Documentation', 'Volatility Foundation', 'manual'), + ( + "index", + "Volatility.tex", + "Volatility 3 Documentation", + "Volatility Foundation", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of @@ -321,7 +356,15 @@ def setup(app): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [('vol-cli', 'volatility', 'Volatility 3 Documentation', ['Volatility Foundation'], 1)] +man_pages = [ + ( + "vol-cli", + "volatility", + "Volatility 3 Documentation", + ["Volatility Foundation"], + 1, + ) +] # If true, show URL addresses after external links. # man_show_urls = False @@ -332,8 +375,15 @@ def setup(app): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'Volatility', 'Volatility 3 Documentation', 'Volatility Foundation', 'Volatility', - 'Memory forensics framework.', 'Miscellaneous'), + ( + "index", + "Volatility", + "Volatility 3 Documentation", + "Volatility Foundation", + "Volatility", + "Memory forensics framework.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. @@ -349,10 +399,14 @@ def setup(app): # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None} +intersphinx_mapping = {"python": ("http://docs.python.org/", None)} # -- Autodoc options ------------------------------------------------------- # autodoc_member_order = 'groupwise' -autodoc_default_options = {'members': True, 'inherited-members': True, 'show-inheritance': True} -autoclass_content = 'both' +autodoc_default_options = { + "members": True, + "inherited-members": True, + "show-inheritance": True, +} +autoclass_content = "both" diff --git a/doc/source/getting-started-mac-tutorial.rst b/doc/source/getting-started-mac-tutorial.rst new file mode 100644 index 0000000000..42e58c0d58 --- /dev/null +++ b/doc/source/getting-started-mac-tutorial.rst @@ -0,0 +1,153 @@ +macOS Tutorial +============== + +This guide will give you a brief overview of how volatility3 works as well as a demonstration of several of the plugins available in the suite. + +Acquiring memory +---------------- + +Volatility3 does not provide the ability to acquire memory. The example below is an open source tool. Other commercial tools are also available. + +* `osxpmem `_ + + + +Procedure to create symbol tables for macOS +-------------------------------------------- + +To create a symbol table please refer to :ref:`symbol-tables:Mac or Linux symbol tables`. + +.. tip:: It may be possible to locate pre-made ISF files from the `download link `_ , + which is built and maintained by `volatilityfoundation `_. + After creating the file or downloading it from the link, place the file under the directory ``volatility3/symbols/``. + + +Listing plugins +--------------- + +The following is a sample of the macOS plugins available for volatility3, it is not complete and more plugins may +be added. For a complete reference, please see the volatility 3 :doc:`list of plugins `. +For plugin requests, please create an issue with a description of the requested plugin. + +.. code-block:: shell-session + + $ python3 vol.py --help | grep -i mac. | head -n 4 + mac.bash.Bash Recovers bash command history from memory. + mac.check_syscall.Check_syscall + mac.check_sysctl.Check_sysctl + mac.check_trap_table.Check_trap_table + +.. note:: Here the the command is piped to grep and head in-order to provide the start of the list of macOS plugins. + + +Using plugins +------------- + +The following is the syntax to run the volatility CLI. + +.. code-block:: shell-session + + $ python3 vol.py -f + + +Example +------- + +banners +~~~~~~~ + +In this example we will be using a memory dump from the Securinets CTF Quals 2019 Challenge called Contact_me. We will limit the discussion to memory forensics with volatility 3 and not extend it to other parts of the challenge. +Thanks go to `stuxnet `_ for providing this memory dump and `writeup `_. + + +.. code-block:: shell-session + + $ python3 vol.py -f contact_me banners.Banners + + Volatility 3 Framework 2.4.2 + + Progress: 100.00 PDB scanning finished + Offset Banner + + 0x4d2c7d0 Darwin Kernel Version 16.7.0: Thu Jun 15 17:36:27 PDT 2017; root:xnu-3789.70.16~2/RELEASE_X86_64 + 0xb42b180 Darwin Kernel Version 16.7.0: Thu Jun 15 17:36:27 PDT 2017; root:xnu-3789.70.16~2/RELEASE_X86_64 + 0xcda9100 Darwin Kernel Version 16.7.0: Thu Jun 15 17:36:27 PDT 2017; root:xnu-3789.70.16~2/RELEASE_X86_64 + 0x1275e7d0 Darwin Kernel Version 16.7.0: Thu Jun 15 17:36:27 PDT 2017; root:xnu-3789.70.16~2/RELEASE_X86_64 + 0x1284fba4 Darwin Kernel Version 16.7.0: Thu Jun 15 17:36:27 PDT 2017; root:xnu-3789.70.16~2/RELEASE_X86_64 + 0x34ad0180 Darwin Kernel Version 16.7.0: Thu Jun 15 17:36:27 PDT 2017; root:xnu-3789.70.16~2/RELEASE_X86_64 + + +The above command helps us to find the memory dump's Darwin kernel version. Now using the above banner we can search for the needed ISF file. +If ISF file cannot be found then, follow the instructions on :ref:`getting-started-mac-tutorial:Procedure to create symbol tables for macOS`. After that, place the ISF file under the ``volatility3/symbols`` directory. + +mac.pslist +~~~~~~~~~~ + +.. code-block:: shell-session + + $ python3 vol.py -f contact_me mac.pslist.PsList + + Volatility 3 Framework 2.4.2 + Progress: 100.00 Stacking attempts finished + + PID PPID COMM + + 0 0 kernel_task + 1 0 launchd + 35 1 UserEventAgent + 38 1 kextd + 39 1 fseventsd + 37 1 uninstalld + 45 1 configd + 46 1 powerd + 52 1 logd + 58 1 warmd + ..... + +``mac.pslist`` helps us to list the processes which are running, their PIDs and PPIDs. + +mac.pstree +~~~~~~~~~~ + +.. code-block:: shell-session + + $ python3 vol.py -f contact_me mac.pstree.PsTree + Volatility 3 Framework 2.4.2 + Progress: 100.00 Stacking attempts finished + PID PPID COMM + + 35 1 UserEventAgent + 38 1 kextd + 39 1 fseventsd + 37 1 uninstalld + 204 1 softwareupdated + * 449 204 SoftwareUpdateCo + 337 1 system_installd + * 455 337 update_dyld_shar + +``mac.pstree`` helps us to display the parent child relationships between processes. + +mac.ifconfig +~~~~~~~~~~~~ + +.. code-block:: shell-session + + $ python3 vol.py -f contact_me mac.ifconfig.Ifconfig + + Volatility 3 Framework 2.4.2 + Progress: 100.00 Stacking attempts finished + Interface IP Address Mac Address Promiscuous + + lo0 False + lo0 127.0.0.1 False + lo0 ::1 False + lo0 fe80:1::1 False + gif0 False + stf0 False + en0 00:0C:29:89:8B:F0 00:0C:29:89:8B:F0 False + en0 fe80:4::10fb:c89d:217f:52ae 00:0C:29:89:8B:F0 False + en0 192.168.140.128 00:0C:29:89:8B:F0 False + utun0 False + utun0 fe80:5::2a95:bb15:87e3:977c False + +we can use the ``mac.ifconfig`` plugin to get information about the configuration of the network interfaces of the host under investigation. diff --git a/doc/source/index.rst b/doc/source/index.rst index 9b1d05858c..7f35e9bcb0 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -25,6 +25,7 @@ There is also some information to get you started quickly: :caption: Getting Started getting-started-linux-tutorial + getting-started-mac-tutorial getting-started-windows-tutorial diff --git a/doc/source/using-as-a-library.rst b/doc/source/using-as-a-library.rst index c63adcfc35..fb012f1ae9 100644 --- a/doc/source/using-as-a-library.rst +++ b/doc/source/using-as-a-library.rst @@ -67,9 +67,10 @@ return a dictionary of plugin names and the plugin classes. Determine what configuration options a plugin requires ------------------------------------------------------ -For each plugin class, we can call the classmethod `requirements` on it, which will return a list of objects that -adhere to the :py:class:`~volatility3.framework.interfaces.configuration.RequirementInterface` method. The various -types of Requirement are split roughly in two, +For each plugin class, we can call the classmethod +:py:func:`~volatility3.framework.interfaces.configuration.ConfigurableInterface.get_requirements` on it, which will +return a list of objects that adhere to the :py:class:`~volatility3.framework.interfaces.configuration.RequirementInterface` +method. The various types of Requirement are split roughly in two, :py:class:`~volatility3.framework.interfaces.configuration.SimpleTypeRequirement` (such as integers, booleans, floats and strings) and more complex requirements (such as lists, choices, multiple requirements, translation layer requirements or symbol table requirements). A requirement just specifies a type of data and a name, and must be diff --git a/volatility3/__init__.py b/volatility3/__init__.py index 28df7da5aa..94a6721e13 100644 --- a/volatility3/__init__.py +++ b/volatility3/__init__.py @@ -38,10 +38,13 @@ def find_spec( """Mock find_spec method that just checks the name, this must go first.""" if fullname.startswith("volatility3.framework.plugins."): - warning = "Please do not use the volatility3.framework.plugins namespace directly, only use volatility3.plugins" + warning = f"Import {fullname}: Please do not use the volatility3.framework.plugins namespace directly, only use volatility3.plugins" # Pyinstaller uses walk_packages/_collect_submodules to import, but needs to read the modules to figure out dependencies # As such, we only print the warning when directly imported rather than from within walk_packages/_collect_submodules - if inspect.stack()[-2].function in ["walk_packages", "_collect_submodules"]: + if inspect.stack()[-2].function not in [ + "walk_packages", + "_collect_submodules", + ] and inspect.stack()[-3].function not in ["_collect_submodules"]: raise Warning(warning) diff --git a/volatility3/cli/__init__.py b/volatility3/cli/__init__.py index 336902d501..9bfd14c6c6 100644 --- a/volatility3/cli/__init__.py +++ b/volatility3/cli/__init__.py @@ -353,7 +353,9 @@ def run(self): ### if args.file: try: - single_location = self.location_from_file(args.file) + single_location = requirements.URIRequirement.location_from_file( + args.file + ) ctx.config["automagic.LayerStacker.single_location"] = single_location except ValueError as excp: parser.error(str(excp)) @@ -456,19 +458,10 @@ def location_from_file(cls, filename: str) -> str: Returns: The URL for the location of the file """ - # We want to work in URLs, but we need to accept absolute and relative files (including on windows) - single_location = parse.urlparse(filename, "") - if single_location.scheme == "" or len(single_location.scheme) == 1: - single_location = parse.urlparse( - parse.urljoin("file:", request.pathname2url(os.path.abspath(filename))) - ) - if single_location.scheme == "file": - if not os.path.exists(request.url2pathname(single_location.path)): - filename = request.url2pathname(single_location.path) - if not filename: - raise ValueError("File URL looks incorrect (potentially missing /)") - raise ValueError(f"File does not exist: {filename}") - return parse.urlunparse(single_location) + vollog.debug( + f"{__name__}.location_from_file has been deprecated and moved to requirements.URIRequirement.location_from_file" + ) + return requirements.URIRequirement.location_from_file(filename) def process_exceptions(self, excp): """Provide useful feedback if an exception occurs during a run of a plugin.""" diff --git a/volatility3/cli/volshell/generic.py b/volatility3/cli/volshell/generic.py index df369c53eb..ea9e65d9b5 100644 --- a/volatility3/cli/volshell/generic.py +++ b/volatility3/cli/volshell/generic.py @@ -480,7 +480,7 @@ def run_script(self, location: str): accessor = resources.ResourceAccessor() with accessor.open(url=location) as fp: self.__console.runsource( - io.TextIOWrapper(fp.read(), encoding="utf-8"), symbol="exec" + io.TextIOWrapper(fp, encoding="utf-8").read(), symbol="exec" ) print("\nCode complete") diff --git a/volatility3/framework/__init__.py b/volatility3/framework/__init__.py index ec0edc2fe1..9c17846a80 100644 --- a/volatility3/framework/__init__.py +++ b/volatility3/framework/__init__.py @@ -26,6 +26,7 @@ import inspect import logging import os +import traceback from typing import Any, Dict, Generator, List, Tuple, Type, TypeVar from volatility3.framework import constants, interfaces @@ -183,7 +184,11 @@ def import_file(module: str, path: str, ignore_errors: bool = False) -> List[str try: importlib.import_module(module) except ImportError as e: - vollog.debug(str(e)) + vollog.debug( + "".join( + traceback.TracebackException.from_exception(e).format(chain=True) + ) + ) vollog.debug( "Failed to import module {} based on file: {}".format(module, path) ) @@ -219,8 +224,7 @@ def list_plugins() -> Dict[str, Type[interfaces.plugins.PluginInterface]]: def clear_cache(complete=False): - glob_pattern = "*.cache" - if not complete: - glob_pattern = "data_" + glob_pattern - for cache_filename in glob.glob(os.path.join(constants.CACHE_PATH, glob_pattern)): - os.unlink(cache_filename) + try: + os.unlink(os.path.join(constants.CACHE_PATH, constants.IDENTIFIERS_FILENAME)) + except FileNotFoundError: + vollog.log(constants.LOGLEVEL_VVVV, "Attempting to clear a non-existant cache") diff --git a/volatility3/framework/automagic/symbol_cache.py b/volatility3/framework/automagic/symbol_cache.py index a58bf00912..29f2cfd08b 100644 --- a/volatility3/framework/automagic/symbol_cache.py +++ b/volatility3/framework/automagic/symbol_cache.py @@ -310,6 +310,14 @@ def update(self, progress_callback=None): new_locations = on_disk_locations.difference(cached_locations) missing_locations = cached_locations.difference(on_disk_locations) + # Missing entries + if missing_locations: + self._database.cursor().execute( + f"DELETE FROM cache WHERE location IN ({','.join(['?'] * len(missing_locations))})", + [x for x in missing_locations], + ) + self._database.commit() + cache_update = set() files_to_timestamp = on_disk_locations.intersection(cached_locations) if files_to_timestamp: @@ -437,15 +445,6 @@ def update(self, progress_callback=None): progress_callback(100, "Reading remote ISF list") self._database.commit() - # Missing entries - - if missing_locations: - self._database.cursor().execute( - f"DELETE FROM cache WHERE location IN ({','.join(['?'] * len(missing_locations))})", - [x for x in missing_locations], - ) - self._database.commit() - def get_identifier_dictionary( self, operating_system: Optional[str] = None, local_only: bool = False ) -> Dict[bytes, str]: diff --git a/volatility3/framework/automagic/windows.py b/volatility3/framework/automagic/windows.py index 986eeae229..a8530829ba 100644 --- a/volatility3/framework/automagic/windows.py +++ b/volatility3/framework/automagic/windows.py @@ -367,6 +367,7 @@ def __call__( progress_callback: constants.ProgressCallback = None, ) -> None: """Finds translation layers that can have swap layers added.""" + path_join = interfaces.configuration.path_join self._translation_requirement = self.find_requirements( context, @@ -382,11 +383,13 @@ def __call__( swap_sub_config, swap_req = self.find_swap_requirement( trans_sub_config, trans_req ) + counter = 0 swap_config = interfaces.configuration.parent_path(swap_sub_config) if swap_req and swap_req.unsatisfied(context, swap_config): # See if any of them need constructing + for swap_location in self.config.get("single_swap_locations", []): # Setup config locations/paths current_layer_name = swap_req.name + str(counter) @@ -398,7 +401,17 @@ def __call__( # Fill in the config if swap_location: context.config[current_layer_path] = current_layer_name - context.config[layer_loc_path] = swap_location + try: + context.config[ + layer_loc_path + ] = requirements.URIRequirement.location_from_file( + swap_location + ) + except ValueError: + vollog.warning( + f"Volatility swap_location {swap_location} could not be validated - swap layer disabled" + ) + continue context.config[ layer_class_path ] = "volatility3.framework.layers.physical.FileLayer" diff --git a/volatility3/framework/configuration/requirements.py b/volatility3/framework/configuration/requirements.py index 6b64b1cb95..abdffdbe49 100644 --- a/volatility3/framework/configuration/requirements.py +++ b/volatility3/framework/configuration/requirements.py @@ -10,7 +10,9 @@ """ import abc import logging +import os from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type +from urllib import parse, request from volatility3.framework import constants, interfaces @@ -55,6 +57,30 @@ class URIRequirement(StringRequirement): # TODO: Maybe a a check that to unsatisfied that the path really is a URL? + @classmethod + def location_from_file(cls, filename: str) -> str: + """Returns the URL location from a file parameter (which may be a URL) + + Args: + filename: The path to the file (either an absolute, relative, or URL path) + + Returns: + The URL for the location of the file + """ + # We want to work in URLs, but we need to accept absolute and relative files (including on windows) + single_location = parse.urlparse(filename, "") + if single_location.scheme == "" or len(single_location.scheme) == 1: + single_location = parse.urlparse( + parse.urljoin("file:", request.pathname2url(os.path.abspath(filename))) + ) + if single_location.scheme == "file": + if not os.path.exists(request.url2pathname(single_location.path)): + filename = request.url2pathname(single_location.path) + if not filename: + raise ValueError("File URL looks incorrect (potentially missing /)") + raise ValueError(f"File does not exist: {filename}") + return parse.urlunparse(single_location) + class BytesRequirement(interfaces.configuration.SimpleTypeRequirement): """A requirement type that contains a byte string.""" diff --git a/volatility3/framework/constants/__init__.py b/volatility3/framework/constants/__init__.py index 8f1163fe13..de1674885f 100644 --- a/volatility3/framework/constants/__init__.py +++ b/volatility3/framework/constants/__init__.py @@ -44,8 +44,8 @@ # We use the SemVer 2.0.0 versioning scheme VERSION_MAJOR = 2 # Number of releases of the library with a breaking change -VERSION_MINOR = 4 # Number of changes that only add to the interface -VERSION_PATCH = 1 # Number of changes that do not change the interface +VERSION_MINOR = 5 # Number of changes that only add to the interface +VERSION_PATCH = 0 # Number of changes that do not change the interface VERSION_SUFFIX = "" # TODO: At version 2.0.0, remove the symbol_shift feature diff --git a/volatility3/framework/constants/linux/__init__.py b/volatility3/framework/constants/linux/__init__.py index 1b133eb42b..a802e0adaf 100644 --- a/volatility3/framework/constants/linux/__init__.py +++ b/volatility3/framework/constants/linux/__init__.py @@ -234,3 +234,48 @@ "HIDP", "AVDTP", ) + +# Ref: include/uapi/linux/capability.h +CAPABILITIES = ( + "chown", + "dac_override", + "dac_read_search", + "fowner", + "fsetid", + "kill", + "setgid", + "setuid", + "setpcap", + "linux_immutable", + "net_bind_service", + "net_broadcast", + "net_admin", + "net_raw", + "ipc_lock", + "ipc_owner", + "sys_module", + "sys_rawio", + "sys_chroot", + "sys_ptrace", + "sys_pacct", + "sys_admin", + "sys_boot", + "sys_nice", + "sys_resource", + "sys_time", + "sys_tty_config", + "mknod", + "lease", + "audit_write", + "audit_control", + "setfcap", + "mac_override", + "mac_admin", + "syslog", + "wake_alarm", + "block_suspend", + "audit_read", + "perfmon", + "bpf", + "checkpoint_restore", +) diff --git a/volatility3/framework/contexts/__init__.py b/volatility3/framework/contexts/__init__.py index 226a303dd1..73868a58f3 100644 --- a/volatility3/framework/contexts/__init__.py +++ b/volatility3/framework/contexts/__init__.py @@ -272,6 +272,7 @@ def object_from_symbol( symbol_name: str, native_layer_name: Optional[str] = None, absolute: bool = False, + object_type: Optional[Union[str, "interfaces.objects.ObjectInterface"]] = None, **kwargs, ) -> "interfaces.objects.ObjectInterface": """Returns an object based on a specific symbol (containing type and @@ -284,6 +285,7 @@ def object_from_symbol( symbol_name: Name of the symbol (within the module) to construct native_layer_name: Name of the layer in which constructed objects are made (for pointers) absolute: whether the symbol's address is absolute or relative to the module + object_type: Override for the type from the symobl to use (or if the symbol type is missing) """ if constants.BANG not in symbol_name: symbol_name = self.symbol_table_name + constants.BANG + symbol_name @@ -299,8 +301,13 @@ def object_from_symbol( if not absolute: offset += self._offset - if symbol_val.type is None: - raise TypeError(f"Symbol {symbol_val.name} has no associated type") + if object_type is None: + if symbol_val.type is None: + raise TypeError( + f"Symbol {symbol_val.name} has no associated type and no object_type specified" + ) + else: + object_type = symbol_val.type # Ensure we don't use a layer_name other than the module's, why would anyone do that? if "layer_name" in kwargs: @@ -308,7 +315,7 @@ def object_from_symbol( # Since type may be a template, we don't just call our own module method return self._context.object( - object_type=symbol_val.type, + object_type=object_type, layer_name=self._layer_name, offset=offset, native_layer_name=native_layer_name or self._native_layer_name, @@ -381,6 +388,7 @@ class ModuleCollection(interfaces.context.ModuleContainer): def __init__( self, modules: Optional[List[interfaces.context.ModuleInterface]] = None ) -> None: + self._prefix_count = {} super().__init__(modules) def deduplicate(self) -> "ModuleCollection": @@ -400,9 +408,13 @@ def deduplicate(self) -> "ModuleCollection": def free_module_name(self, prefix: str = "module") -> str: """Returns an unused module name""" - count = 1 + if prefix not in self._prefix_count: + self._prefix_count[prefix] = 1 + return prefix + count = self._prefix_count[prefix] while prefix + str(count) in self: count += 1 + self._prefix_count[prefix] = count return prefix + str(count) @property diff --git a/volatility3/framework/interfaces/context.py b/volatility3/framework/interfaces/context.py index 7e385746d5..29cb41379c 100644 --- a/volatility3/framework/interfaces/context.py +++ b/volatility3/framework/interfaces/context.py @@ -253,6 +253,7 @@ def object_from_symbol( symbol_name: str, native_layer_name: Optional[str] = None, absolute: bool = False, + object_type: Optional[Union[str, "interfaces.objects.ObjectInterface"]] = None, **kwargs, ) -> "interfaces.objects.ObjectInterface": """Returns an object created using the symbol_table_name and layer_name @@ -262,6 +263,7 @@ def object_from_symbol( symbol_name: The name of a symbol (that must be present in the module's symbol table). The symbol's associated type will be used to construct an object at the symbol's offset. native_layer_name: The native layer for objects that reference a different layer (if not the default provided during module construction) absolute: A boolean specifying whether the offset is absolute within the layer, or relative to the start of the module + object_type: Override for the type from the symobl to use (or if the symbol type is missing) Returns: The constructed object diff --git a/volatility3/framework/layers/avml.py b/volatility3/framework/layers/avml.py index c9c682ac4d..c825464ccc 100644 --- a/volatility3/framework/layers/avml.py +++ b/volatility3/framework/layers/avml.py @@ -19,11 +19,18 @@ try: # TODO: Find library for windows if needed try: - # Linux/Mac + # Linux lib_snappy = ctypes.cdll.LoadLibrary("libsnappy.so.1") except OSError: lib_snappy = None + try: + if not lib_snappy: + # macOS + lib_snappy = ctypes.cdll.LoadLibrary("libsnappy.1.dylib") + except OSError: + lib_snappy = None + try: if not lib_snappy: # Windows 64 @@ -31,7 +38,7 @@ except OSError: lib_snappy = None - if lib_snappy: + if not lib_snappy: # Windows 32 lib_snappy = ctypes.cdll.LoadLibrary("snappy32") diff --git a/volatility3/framework/layers/intel.py b/volatility3/framework/layers/intel.py index 478eb168f0..046203fa68 100644 --- a/volatility3/framework/layers/intel.py +++ b/volatility3/framework/layers/intel.py @@ -111,6 +111,11 @@ def _page_is_valid(entry: int) -> bool: """Returns whether a particular page is valid based on its entry.""" return bool(entry & 1) + @staticmethod + def _page_is_dirty(entry: int) -> bool: + """Returns whether a particular page is dirty based on its entry.""" + return bool(entry & (1 << 6)) + def canonicalize(self, addr: int) -> int: """Canonicalizes an address by performing an appropiate sign extension on the higher addresses""" if self._bits_per_register <= self._maxvirtaddr: @@ -259,6 +264,10 @@ def is_valid(self, offset: int, length: int = 1) -> bool: except exceptions.InvalidAddressException: return False + def is_dirty(self, offset: int) -> bool: + """Returns whether the page at offset is marked dirty""" + return self._page_is_dirty(self._translate_entry(offset)[0]) + def mapping( self, offset: int, length: int, ignore_errors: bool = False ) -> Iterable[Tuple[int, int, int, int, str]]: diff --git a/volatility3/framework/layers/xen.py b/volatility3/framework/layers/xen.py new file mode 100644 index 0000000000..f7881a0911 --- /dev/null +++ b/volatility3/framework/layers/xen.py @@ -0,0 +1,180 @@ +import logging +import struct +from typing import Optional + +from volatility3.framework import constants, interfaces, exceptions +from volatility3.framework.layers import elf +from volatility3.framework.symbols import intermed + +vollog = logging.getLogger(__name__) + + +class XenCoreDumpLayer(elf.Elf64Layer): + """A layer that supports the Xen Dump-Core format as documented at: https://xenbits.xen.org/docs/4.6-testing/misc/dump-core-format.txt""" + + _header_struct = struct.Struct(" None: + # Create a custom SymbolSpace + self._elf_table_name = intermed.IntermediateSymbolTable.create( + context, config_path, "linux", "elf" + ) + self._xen_table_name = intermed.IntermediateSymbolTable.create( + context, config_path, "linux", "xen" + ) + self._segment_headers = {} + + super().__init__(context, config_path, name) + + def _extract_result_array( + self, varname: str, segment_index: int + ) -> interfaces.objects.ObjectInterface: + hdr = self._segment_headers[segment_index] + result = self.context.object( + self._xen_table_name + constants.BANG + varname, + layer_name=self._base_layer, + offset=hdr.sh_offset, + size=hdr.sh_size, + ) + result.entries.count = hdr.sh_size // result.entries.vol.subtype.size + return result + + def _load_segments(self) -> None: + """Load the segments from based on the PT_LOAD segments of the Elf64 format""" + ehdr = self.context.object( + self._elf_table_name + constants.BANG + "Elf64_Ehdr", + layer_name=self._base_layer, + offset=0, + ) + + segments = [] + self._segment_headers = [] + + for sindex in range(ehdr.e_shnum): + shdr = self.context.object( + self._elf_table_name + constants.BANG + "Elf64_Shdr", + layer_name=self._base_layer, + offset=ehdr.e_shoff + (sindex * ehdr.e_shentsize), + ) + + self._segment_headers.append(shdr) + + if sindex == ehdr.e_shstrndx: + segment_names = self.context.layers[self._base_layer].read( + shdr.sh_offset, shdr.sh_size + ) + segment_names = segment_names.split(b"\x00") + + if not segment_names: + raise elf.ElfFormatException("No segment names, not a Xen Core Dump") + + try: + p2m_data = self._extract_result_array( + "xen_p2m", segment_names.index(b".xen_p2m") + ) + except ValueError: + p2m_data = None + try: + pfn_data = self._extract_result_array( + "xen_pfn", segment_names.index(b".xen_pfn") + ) + except ValueError: + pfn_data = None + + pages_hdr = self._segment_headers[segment_names.index(b".xen_pages")] + page_size = 0x1000 + + if pfn_data and not p2m_data: + for entry_index in range(len(pfn_data.entries)): + entry = pfn_data.entries[entry_index] + # TODO: Don't hardcode the maximum value here + if entry and entry != 0xFFFFFFFF: + segments.append( + ( + entry * page_size, + pages_hdr.sh_offset + (entry_index * page_size), + page_size, + page_size, + ) + ) + elif p2m_data and not pfn_data: + for entry_index in range(len(p2m_data.entries)): + entry = p2m_data.entries[entry_index] + # TODO: Don't hardcode the maximum value here + if entry.pfn != 0xFFFFFFFF: + segments.append( + ( + entry.pfn * page_size, + pages_hdr.sh_offset + (entry_index * page_size), + page_size, + page_size, + ) + ) + elif p2m_data and pfn_data: + raise elf.ElfFormatException( + self.name, f"Both P2M and PFN in Xen Core Dump" + ) + else: + raise elf.ElfFormatException( + self.name, f"Neither P2M nor PFN in Xen Core Dump" + ) + + if len(segments) == 0: + raise elf.ElfFormatException( + self.name, f"No ELF segments defined in {self._base_layer}" + ) + + self._segments = segments + + @classmethod + def _check_header( + cls, base_layer: interfaces.layers.DataLayerInterface, offset: int = 0 + ) -> bool: + try: + header_data = base_layer.read(offset, cls._header_struct.size) + except exceptions.InvalidAddressException: + raise elf.ElfFormatException( + base_layer.name, + f"Offset 0x{offset:0x} does not exist within the base layer", + ) + (magic, elf_class, elf_data_encoding, elf_version) = cls._header_struct.unpack( + header_data + ) + if magic != cls.MAGIC: + raise elf.ElfFormatException( + base_layer.name, f"Bad magic 0x{magic:x} at file offset 0x{offset:x}" + ) + if elf_class != cls.ELF_CLASS: + raise elf.ElfFormatException( + base_layer.name, f"ELF class is not 64-bit (2): {elf_class:d}" + ) + # Virtualbox uses an ELF version of 0, which isn't to specification, but is ok to deal with + return True + + +class XenCoreDumpStacker(elf.Elf64Stacker): + stack_order = 10 + + @classmethod + def stack( + cls, + context: interfaces.context.ContextInterface, + layer_name: str, + progress_callback: constants.ProgressCallback = None, + ) -> Optional[interfaces.layers.DataLayerInterface]: + try: + if not XenCoreDumpLayer._check_header(context.layers[layer_name]): + return None + except elf.ElfFormatException as excp: + vollog.log(constants.LOGLEVEL_VVVV, f"Exception: {excp}") + return None + new_name = context.layers.free_layer_name("XenCoreDumpLayer") + context.config[ + interfaces.configuration.path_join(new_name, "base_layer") + ] = layer_name + + return XenCoreDumpLayer(context, new_name, new_name) diff --git a/volatility3/framework/plugins/linux/capabilities.py b/volatility3/framework/plugins/linux/capabilities.py new file mode 100644 index 0000000000..518f526039 --- /dev/null +++ b/volatility3/framework/plugins/linux/capabilities.py @@ -0,0 +1,208 @@ +# This file is Copyright 2023 Volatility Foundation and licensed under the Volatility Software License 1.0 +# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0 +# + +import logging +from dataclasses import dataclass, astuple, fields +from typing import Iterable, List, Tuple + +from volatility3.framework import interfaces, renderers, exceptions +from volatility3.framework.configuration import requirements +from volatility3.framework.interfaces import plugins +from volatility3.framework.objects import utility +from volatility3.framework.symbols.linux import extensions +from volatility3.plugins.linux import pslist + +vollog = logging.getLogger(__name__) + + +@dataclass +class TaskData: + """Stores basic information about a task""" + + comm: str + pid: int + tgid: int + ppid: int + euid: int + + +@dataclass +class CapabilitiesData: + """Stores each set of capabilties for a task""" + + cap_inheritable: interfaces.objects.ObjectInterface + cap_permitted: interfaces.objects.ObjectInterface + cap_effective: interfaces.objects.ObjectInterface + cap_bset: interfaces.objects.ObjectInterface + cap_ambient: interfaces.objects.ObjectInterface + + def astuple(self) -> Tuple: + """Returns a shallow copy of the capability sets in a tuple. + + Otherwise, when dataclasses.astuple() performs a deep-copy recursion on + ObjectInterface will take a substantial amount of time. + """ + return tuple(getattr(self, field.name) for field in fields(self)) + + +class Capabilities(plugins.PluginInterface): + """Lists process capabilities""" + + _required_framework_version = (2, 0, 0) + + _version = (1, 0, 0) + + @classmethod + def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]: + return [ + requirements.ModuleRequirement( + name="kernel", + description="Linux kernel", + architectures=["Intel32", "Intel64"], + ), + requirements.PluginRequirement( + name="pslist", plugin=pslist.PsList, version=(2, 0, 0) + ), + requirements.ListRequirement( + name="pids", + description="Filter on specific process IDs.", + element_type=int, + optional=True, + ), + ] + + def _check_capabilities_support( + self, + context: interfaces.context.ContextInterface, + vmlinux_module_name: str, + ): + """Checks that the framework supports at least as much capabilities as + the kernel being analysed. Otherwise, it shows a warning for the + developers. + """ + + vmlinux = context.modules[vmlinux_module_name] + + try: + kernel_cap_last_cap = vmlinux.object_from_symbol(symbol_name="cap_last_cap") + except exceptions.SymbolError: + # It should be a kernel < 3.2 + return + + vol2_last_cap = extensions.kernel_cap_struct.get_last_cap_value() + if kernel_cap_last_cap > vol2_last_cap: + vollog.warning( + "Developers: The supported Linux capabilities of this plugin are outdated for this kernel" + ) + + @staticmethod + def _decode_cap(cap: interfaces.objects.ObjectInterface) -> str: + """Returns a textual representation of the capability set. + The format is a comma-separated list of capabilitites. In order to + summarize the output and if all the capabilities are enabled, instead of + the individual capabilities, the special name "all" will be shown. + + Args: + cap: Kernel capability object. Usually a 'kernel_cap_struct' struct + + Returns: + str: A string with a comma separated list of decoded capabilities + """ + if isinstance(cap, renderers.NotAvailableValue): + return cap + + cap_value = cap.get_capabilities() + if not cap_value: + return "" + + if cap_value == cap.get_kernel_cap_full(): + return "all" + + return ", ".join(cap.enumerate_capabilities()) + + @classmethod + def get_task_capabilities( + cls, task: interfaces.objects.ObjectInterface + ) -> Tuple[TaskData, CapabilitiesData]: + """Returns a tuple with the task basic information along with its capabilities + + Args: + task: A task object from where to get the fields. + + Returns: + A tuple with the task basic information and its capabilities + """ + task_data = TaskData( + comm=utility.array_to_string(task.comm), + pid=int(task.pid), + tgid=int(task.tgid), + ppid=int(task.parent.pid), + euid=int(task.cred.euid), + ) + + task_cred = task.real_cred + capabilities_data = CapabilitiesData( + cap_inheritable=task_cred.cap_inheritable, + cap_permitted=task_cred.cap_permitted, + cap_effective=task_cred.cap_effective, + cap_bset=task_cred.cap_bset, + cap_ambient=renderers.NotAvailableValue(), + ) + + # Ambient capabilities were added in kernels 4.3.6 + if task_cred.has_member("cap_ambient"): + capabilities_data.cap_ambient = task_cred.cap_ambient + + return task_data, capabilities_data + + @classmethod + def get_tasks_capabilities( + cls, tasks: List[interfaces.objects.ObjectInterface] + ) -> Iterable[Tuple[TaskData, CapabilitiesData]]: + """Yields a tuple for each task containing the task's basic information along with its capabilities + + Args: + tasks: An iterable with the tasks to process. + + Yields: + A tuple for each task containing the task's basic information and its capabilities + """ + for task in tasks: + yield cls.get_task_capabilities(task) + + def _generator( + self, tasks: Iterable[interfaces.objects.ObjectInterface] + ) -> Iterable[Tuple[int, Tuple]]: + for task_fields, capabilities_fields in self.get_tasks_capabilities(tasks): + task_fields = astuple(task_fields) + + capabilities_text = tuple( + self._decode_cap(cap) for cap in capabilities_fields.astuple() + ) + + yield 0, task_fields + capabilities_text + + def run(self): + self._check_capabilities_support(self.context, self.config["kernel"]) + + pids = self.config.get("pids") + pid_filter = pslist.PsList.create_pid_filter(pids) + tasks = pslist.PsList.list_tasks( + self.context, self.config["kernel"], filter_func=pid_filter + ) + + columns = [ + ("Name", str), + ("Tid", int), + ("Pid", int), + ("PPid", int), + ("EUID", int), + ("cap_inheritable", str), + ("cap_permitted", str), + ("cap_effective", str), + ("cap_bounding", str), + ("cap_ambient", str), + ] + + return renderers.TreeGrid(columns, self._generator(tasks)) diff --git a/volatility3/framework/plugins/linux/elfs.py b/volatility3/framework/plugins/linux/elfs.py index 822a69dd6b..2ff5eb591e 100644 --- a/volatility3/framework/plugins/linux/elfs.py +++ b/volatility3/framework/plugins/linux/elfs.py @@ -48,7 +48,7 @@ def _generator(self, tasks): name = utility.array_to_string(task.comm) - for vma in task.mm.get_mmap_iter(): + for vma in task.mm.get_vma_iter(): hdr = proc_layer.read(vma.vm_start, 4, pad=True) if not ( hdr[0] == 0x7F diff --git a/volatility3/framework/plugins/linux/iomem.py b/volatility3/framework/plugins/linux/iomem.py index 8efbf3b578..785405ef3c 100644 --- a/volatility3/framework/plugins/linux/iomem.py +++ b/volatility3/framework/plugins/linux/iomem.py @@ -42,7 +42,7 @@ def parse_resource( Args: context: The context to retrieve required elements (layers, symbol tables) from vmlinux_module_name: The name of the kernel module on which to operate - resource_offset: The offset to the resouce to be parsed + resource_offset: The offset to the resource to be parsed seen: The set of resource offsets that have already been parsed depth: How deep into the resource structure we are @@ -57,7 +57,7 @@ def parse_resource( except exceptions.InvalidAddressException: vollog.warning( f"Unable to create resource object at {resource_offset:#x}. This resource, " - "its sibling, and any of it's childern and will be missing from the output." + "its sibling, and any of it's children and will be missing from the output." ) return None @@ -66,7 +66,7 @@ def parse_resource( name = utility.pointer_to_string(resource.name, 128) except exceptions.InvalidAddressException: vollog.warning( - "Unable to follow pointer to name for resource object at {resource_offset:#x}, " + f"Unable to follow pointer to name for resource object at {resource_offset:#x}, " "replaced with UnreadableValue" ) name = renderers.UnreadableValue() diff --git a/volatility3/framework/plugins/linux/malfind.py b/volatility3/framework/plugins/linux/malfind.py index 18237b80c9..8a21afc03f 100644 --- a/volatility3/framework/plugins/linux/malfind.py +++ b/volatility3/framework/plugins/linux/malfind.py @@ -3,7 +3,7 @@ # from typing import List - +import logging from volatility3.framework import constants, interfaces from volatility3.framework import renderers from volatility3.framework.configuration import requirements @@ -11,6 +11,8 @@ from volatility3.framework.renderers import format_hints from volatility3.plugins.linux import pslist +vollog = logging.getLogger(__name__) + class Malfind(interfaces.plugins.PluginInterface): """Lists process memory ranges that potentially contain injected code.""" @@ -46,8 +48,15 @@ def _list_injections(self, task): proc_layer = self.context.layers[proc_layer_name] - for vma in task.mm.get_mmap_iter(): - if vma.is_suspicious() and vma.get_name(self.context, task) != "[vdso]": + for vma in task.mm.get_vma_iter(): + vma_name = vma.get_name(self.context, task) + vollog.debug( + f"Injections : processing PID {task.pid} : VMA {vma_name} : {hex(vma.vm_start)}-{hex(vma.vm_end)}" + ) + if ( + vma.is_suspicious(proc_layer) + and vma.get_name(self.context, task) != "[vdso]" + ): data = proc_layer.read(vma.vm_start, 64, pad=True) yield vma, data diff --git a/volatility3/framework/plugins/linux/mountinfo.py b/volatility3/framework/plugins/linux/mountinfo.py index ebd6e55a0f..da743bb601 100644 --- a/volatility3/framework/plugins/linux/mountinfo.py +++ b/volatility3/framework/plugins/linux/mountinfo.py @@ -9,8 +9,10 @@ from volatility3.framework import renderers, interfaces from volatility3.framework.configuration import requirements from volatility3.framework.interfaces import plugins +from volatility3.framework.symbols import linux from volatility3.plugins.linux import pslist + vollog = logging.getLogger(__name__) MountInfoData = namedtuple( @@ -48,6 +50,9 @@ def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface] requirements.PluginRequirement( name="pslist", plugin=pslist.PsList, version=(2, 0, 0) ), + requirements.VersionRequirement( + name="linuxutils", component=linux.LinuxUtilities, version=(2, 1, 0) + ), requirements.ListRequirement( name="pids", description="Filter on specific process IDs.", @@ -71,37 +76,6 @@ def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface] ), ] - @classmethod - def _do_get_path(cls, mnt, fs_root) -> Union[None, str]: - """It mimics the Linux kernel prepend_path function.""" - vfsmnt = mnt.mnt - dentry = vfsmnt.get_mnt_root() - - path_reversed = [] - while dentry != fs_root.dentry or vfsmnt.vol.offset != fs_root.mnt: - if dentry == vfsmnt.get_mnt_root() or dentry.is_root(): - parent = mnt.get_mnt_parent().dereference() - # Escaped? - if dentry != vfsmnt.get_mnt_root(): - return None - - # Global root? - if mnt.vol.offset != parent.vol.offset: - dentry = mnt.get_mnt_mountpoint() - mnt = parent - vfsmnt = mnt.mnt - continue - - return None - - parent = dentry.d_parent - dname = dentry.d_name.name_as_str() - path_reversed.append(dname.strip("/")) - dentry = parent - - path = "/" + "/".join(reversed(path_reversed)) - return path - @classmethod def get_mountinfo( cls, mnt, task @@ -115,8 +89,8 @@ def get_mountinfo( if not mnt_root: return None - path_root = cls._do_get_path(mnt, task.fs.root) - if path_root is None: + path_root = linux.LinuxUtilities.get_path_mnt(task, mnt) + if not path_root: return None mnt_root_path = mnt_root.path() @@ -170,9 +144,11 @@ def get_mountinfo( ) def _get_tasks_mountpoints( - self, tasks: Iterable[interfaces.objects.ObjectInterface], per_namespace: bool + self, + tasks: Iterable[interfaces.objects.ObjectInterface], + filtered_by_pids: bool, ): - seen_namespaces = set() + seen_mountpoints = set() for task in tasks: if not ( task @@ -181,19 +157,27 @@ def _get_tasks_mountpoints( and task.nsproxy and task.nsproxy.mnt_ns ): - # This task doesn't have all the information required + # This task doesn't have all the information required. + # It should be a kernel < 2.6.30 continue mnt_namespace = task.nsproxy.mnt_ns - mnt_ns_id = mnt_namespace.get_inode() - - if per_namespace: - if mnt_ns_id in seen_namespaces: - continue - else: - seen_namespaces.add(mnt_ns_id) + try: + mnt_ns_id = mnt_namespace.get_inode() + except AttributeError: + mnt_ns_id = renderers.NotAvailableValue() for mount in mnt_namespace.get_mount_points(): + # When PIDs are filtered, it makes sense that the user want to + # see each of those processes mount points. So we don't filter + # by mount id in this case. + if not filtered_by_pids: + mnt_id = int(mount.mnt_id) + if mnt_id in seen_mountpoints: + continue + else: + seen_mountpoints.add(mnt_id) + yield task, mount, mnt_ns_id def _generator( @@ -201,10 +185,20 @@ def _generator( tasks: Iterable[interfaces.objects.ObjectInterface], mnt_ns_ids: List[int], mount_format: bool, - per_namespace: bool, + filtered_by_pids: bool, ) -> Iterable[Tuple[int, Tuple]]: - for task, mnt, mnt_ns_id in self._get_tasks_mountpoints(tasks, per_namespace): - if mnt_ns_ids and mnt_ns_id not in mnt_ns_ids: + show_filter_warning = False + for task, mnt, mnt_ns_id in self._get_tasks_mountpoints( + tasks, filtered_by_pids + ): + if mnt_ns_ids and isinstance(mnt_ns_id, renderers.NotAvailableValue): + show_filter_warning = True + + if ( + not isinstance(mnt_ns_id, renderers.NotAvailableValue) + and mnt_ns_ids + and mnt_ns_id not in mnt_ns_ids + ): continue mnt_info = self.get_mountinfo(mnt, task) @@ -242,12 +236,17 @@ def _generator( ] fields_values = [mnt_ns_id] - if not per_namespace: + if filtered_by_pids: fields_values.append(task.pid) fields_values.extend(extra_fields_values) yield (0, fields_values) + if show_filter_warning: + vollog.warning( + "Could not filter by mount namespace id. This field is not available in this kernel." + ) + def run(self): pids = self.config.get("pids") mount_ns_ids = self.config.get("mntns") @@ -263,9 +262,9 @@ def run(self): # to displays the mountpoints per namespace. if pids: columns.append(("PID", int)) - per_namespace = False + filtered_by_pids = True else: - per_namespace = True + filtered_by_pids = False if self.config.get("mount-format"): extra_columns = [ @@ -292,5 +291,6 @@ def run(self): columns.extend(extra_columns) return renderers.TreeGrid( - columns, self._generator(tasks, mount_ns_ids, mount_format, per_namespace) + columns, + self._generator(tasks, mount_ns_ids, mount_format, filtered_by_pids), ) diff --git a/volatility3/framework/plugins/linux/proc.py b/volatility3/framework/plugins/linux/proc.py index 9d8af482e3..e7d38b1072 100644 --- a/volatility3/framework/plugins/linux/proc.py +++ b/volatility3/framework/plugins/linux/proc.py @@ -4,18 +4,25 @@ """A module containing a collection of plugins that produce data typically found in Linux's /proc file system.""" -from volatility3.framework import renderers +import logging +from typing import Callable, Generator, Type, Optional + +from volatility3.framework import renderers, interfaces, exceptions from volatility3.framework.configuration import requirements from volatility3.framework.interfaces import plugins from volatility3.framework.objects import utility from volatility3.framework.renderers import format_hints from volatility3.plugins.linux import pslist +vollog = logging.getLogger(__name__) + class Maps(plugins.PluginInterface): """Lists all memory maps for all processes.""" _required_framework_version = (2, 0, 0) + _version = (1, 0, 0) + MAXSIZE_DEFAULT = 1024 * 1024 * 1024 # 1 Gb @classmethod def get_requirements(cls): @@ -35,16 +42,149 @@ def get_requirements(cls): element_type=int, optional=True, ), + requirements.BooleanRequirement( + name="dump", + description="Extract listed memory segments", + default=False, + optional=True, + ), + requirements.ListRequirement( + name="address", + description="Process virtual memory addresses to include " + "(all other VMA sections are excluded). This can be any " + "virtual address within the VMA section.", + element_type=int, + optional=True, + ), + requirements.IntRequirement( + name="maxsize", + description="Maximum size for dumped VMA sections " + "(all the bigger sections will be ignored)", + default=cls.MAXSIZE_DEFAULT, + optional=True, + ), ] + @classmethod + def list_vmas( + cls, + task: interfaces.objects.ObjectInterface, + filter_func: Callable[ + [interfaces.objects.ObjectInterface], bool + ] = lambda _: True, + ) -> Generator[interfaces.objects.ObjectInterface, None, None]: + """Lists the Virtual Memory Areas of a specific process. + + Args: + task: task object from which to list the vma + filter_func: Function to take a vma and return False if it should be filtered out + + Returns: + Yields vmas based on the task and filtered based on the filter function + """ + if task.mm: + for vma in task.mm.get_vma_iter(): + if filter_func(vma): + yield vma + else: + vollog.debug( + f"Excluded vma at offset {vma.vol.offset:#x} for pid {task.pid} due to filter_func" + ) + else: + vollog.debug( + f"Excluded pid {task.pid} as there is no mm member. It is likely a kernel thread." + ) + + @classmethod + def vma_dump( + cls, + context: interfaces.context.ContextInterface, + task: interfaces.objects.ObjectInterface, + vm_start: int, + vm_end: int, + open_method: Type[interfaces.plugins.FileHandlerInterface], + maxsize: int = MAXSIZE_DEFAULT, + ) -> Optional[interfaces.plugins.FileHandlerInterface]: + """Extracts the complete data for VMA as a FileInterface. + + Args: + context: The context to retrieve required elements (layers, symbol tables) from + task: an task_struct instance + vm_start: The start virtual address from the vma to dump + vm_end: The end virtual address from the vma to dump + open_method: class to provide context manager for opening the file + maxsize: Max size of VMA section (default MAXSIZE_DEFAULT) + + Returns: + An open FileInterface object containing the complete data for the task or None in the case of failure + """ + pid = task.pid + + try: + proc_layer_name = task.add_process_layer() + except exceptions.InvalidAddressException as excp: + vollog.debug( + "Process {}: invalid address {} in layer {}".format( + pid, excp.invalid_address, excp.layer_name + ) + ) + return None + vm_size = vm_end - vm_start + + # check if vm_size is negative, this should never happen. + if vm_size < 0: + vollog.warning( + f"Skip virtual memory dump for pid {pid} between {vm_start:#x}-{vm_end:#x} as {vm_size} is negative." + ) + return None + # check if vm_size is larger than the maxsize limit, and therefore is not saved out. + if maxsize <= vm_size: + vollog.warning( + f"Skip virtual memory dump for pid {pid} between {vm_start:#x}-{vm_end:#x} as {vm_size} is larger than maxsize limit of {maxsize}" + ) + return None + proc_layer = context.layers[proc_layer_name] + file_name = f"pid.{pid}.vma.{vm_start:#x}-{vm_end:#x}.dmp" + try: + file_handle = open_method(file_name) + chunk_size = 1024 * 1024 * 10 + offset = vm_start + while offset < vm_start + vm_size: + to_read = min(chunk_size, vm_start + vm_size - offset) + data = proc_layer.read(offset, to_read, pad=True) + file_handle.write(data) + offset += to_read + except Exception as excp: + vollog.debug(f"Unable to dump virtual memory {file_name}: {excp}") + return None + return file_handle + def _generator(self, tasks): + # build filter for addresses if required + address_list = self.config.get("address", None) + if not address_list: + # do not filter as no address_list was supplied + vma_filter_func = lambda _: True + else: + # filter for any vm_start that matches the supplied address config + def vma_filter_function(x: interfaces.objects.ObjectInterface) -> bool: + addrs_in_vma = [ + addr for addr in address_list if x.vm_start <= addr <= x.vm_end + ] + + # if any of the user supplied addresses would fall within this vma return true + if addrs_in_vma: + return True + else: + return False + + vma_filter_func = vma_filter_function for task in tasks: if not task.mm: continue - name = utility.array_to_string(task.comm) - for vma in task.mm.get_mmap_iter(): + for vma in self.list_vmas(task, filter_func=vma_filter_func): flags = vma.get_protection() page_offset = vma.get_page_offset() major = 0 @@ -58,9 +198,34 @@ def _generator(self, tasks): major = inode_object.i_sb.major minor = inode_object.i_sb.minor inode = inode_object.i_ino - path = vma.get_name(self.context, task) + file_output = "Disabled" + if self.config["dump"]: + file_output = "Error outputting file" + try: + vm_start = vma.vm_start + vm_end = vma.vm_end + except AttributeError: + vollog.debug( + f"Unable to find the vm_start and vm_end for vma at {vma.vol.offset:#x} for pid {task.pid}" + ) + vm_start = None + vm_end = None + if vm_start and vm_end: + # only attempt to dump the memory if we have vm_start and vm_end + file_handle = self.vma_dump( + self.context, + task, + vm_start, + vm_end, + self.open, + self.config["maxsize"], + ) + + if file_handle: + file_handle.close() + file_output = file_handle.preferred_filename yield ( 0, ( @@ -74,6 +239,7 @@ def _generator(self, tasks): minor, inode, path, + file_output, ), ) @@ -92,6 +258,7 @@ def run(self): ("Minor", int), ("Inode", int), ("File Path", str), + ("File output", str), ], self._generator( pslist.PsList.list_tasks( diff --git a/volatility3/framework/plugins/linux/psscan.py b/volatility3/framework/plugins/linux/psscan.py index 7b03d8c87d..462577e583 100644 --- a/volatility3/framework/plugins/linux/psscan.py +++ b/volatility3/framework/plugins/linux/psscan.py @@ -52,7 +52,10 @@ def _get_task_fields( """ pid = task.tgid tid = task.pid - ppid = task.parent.tgid if task.parent else 0 + ppid = 0 + + if task.parent.is_readable(): + ppid = task.parent.tgid name = utility.array_to_string(task.comm) exit_state = DescExitStateEnum(task.exit_state).name diff --git a/volatility3/framework/plugins/linux/sockstat.py b/volatility3/framework/plugins/linux/sockstat.py index f03a2ad8eb..fa67122baa 100644 --- a/volatility3/framework/plugins/linux/sockstat.py +++ b/volatility3/framework/plugins/linux/sockstat.py @@ -28,7 +28,11 @@ def __init__(self, vmlinux, task): self._vmlinux = vmlinux self._task = task - netns_id = task.nsproxy.net_ns.get_inode() + try: + netns_id = task.nsproxy.net_ns.get_inode() + except AttributeError: + netns_id = NotAvailableValue() + self._netdevices = self._build_network_devices_map(netns_id) self._sock_family_handlers = { @@ -61,7 +65,10 @@ def _build_network_devices_map(self, netns_id: int) -> Dict: self._vmlinux.symbol_table_name + constants.BANG + "net_device" ) for net_dev in net.dev_base_head.to_list(net_device_symname, "dev_list"): - if net.get_inode() != netns_id: + if ( + isinstance(netns_id, NotAvailableValue) + or net.get_inode() != netns_id + ): continue dev_name = utility.array_to_string(net_dev.name) netdevices_map[net_dev.ifindex] = dev_name @@ -76,7 +83,7 @@ def process_sock( sock: Kernel generic `sock` object Returns a tuple with: - sock: The respective kernel's *_sock object for that socket family + sock: The respective kernel's \*_sock object for that socket family sock_stat: A tuple with the source and destination (address and port) along with its state string socket_filter: A dictionary with information about the socket filter """ @@ -143,19 +150,32 @@ def _extract_socket_filter_info( return bpfprog = sock_filter.prog - if bpfprog.type == 0: - # BPF_PROG_TYPE_UNSPEC = 0 + + BPF_PROG_TYPE_UNSPEC = 0 # cBPF filter + try: + bpfprog_type = bpfprog.get_type() + if bpfprog_type == BPF_PROG_TYPE_UNSPEC: + return # cBPF filter + except AttributeError: + # kernel < 3.18.140, it's a cBPF filter + return + + BPF_PROG_TYPE_SOCKET_FILTER = 1 # eBPF filter + if bpfprog_type != BPF_PROG_TYPE_SOCKET_FILTER: + socket_filter["bpf_filter_type"] = f"UNK({bpfprog_type})" + vollog.warning(f"Unexpected BPF type {bpfprog_type} for a socket") return socket_filter["bpf_filter_type"] = "eBPF" if not bpfprog.has_member("aux") or not bpfprog.aux: - return + return # kernel < 3.18.140 bpfprog_aux = bpfprog.aux + if bpfprog_aux.has_member("id"): - # `id` member was added to `bpf_prog_aux` in kernels 4.13 + # `id` member was added to `bpf_prog_aux` in kernels 4.13.16 socket_filter["bpf_filter_id"] = str(bpfprog_aux.id) if bpfprog_aux.has_member("name"): - # `name` was added to `bpf_prog_aux` in kernels 4.15 + # `name` was added to `bpf_prog_aux` in kernels 4.15.18 bpfprog_name = utility.array_to_string(bpfprog_aux.name) if bpfprog_name: socket_filter["bpf_filter_name"] = bpfprog_name @@ -227,14 +247,22 @@ def _netlink_sock( if netlink_sock.groups: groups_bitmap = netlink_sock.groups.dereference() src_addr = f"groups:0x{groups_bitmap:08x}" - src_port = netlink_sock.portid + + try: + # Kernel >= 3.7.10 + src_port = netlink_sock.get_portid() + except AttributeError: + src_port = NotAvailableValue() dst_addr = f"group:0x{netlink_sock.dst_group:08x}" module = netlink_sock.module if module and module.name: module_name_str = utility.array_to_string(module.name) dst_addr = f"{dst_addr},lkm:{module_name_str}" - dst_port = netlink_sock.dst_portid + try: + dst_port = netlink_sock.get_dst_portid() + except AttributeError: + dst_port = NotAvailableValue() state = netlink_sock.get_state() @@ -473,8 +501,7 @@ def list_sockets( family: Socket family string (AF_UNIX, AF_INET, etc) sock_type: Socket type string (STREAM, DGRAM, etc) protocol: Protocol string (UDP, TCP, etc) - sock_fields: A tuple with the *_sock object, the sock stats and the - extended info dictionary + sock_fields: A tuple with the \*_sock object, the sock stats and the extended info dictionary """ vmlinux = context.modules[symbol_table] @@ -518,7 +545,11 @@ def list_sockets( protocol = child_sock.get_protocol() net = task.nsproxy.net_ns - netns_id = net.get_inode() + try: + netns_id = net.get_inode() + except AttributeError: + netns_id = NotAvailableValue() + yield task, netns_id, fd_num, family, sock_type, protocol, sock_fields def _format_fields(self, sock_stat, protocol): diff --git a/volatility3/framework/plugins/mac/pslist.py b/volatility3/framework/plugins/mac/pslist.py index 1d97216bfb..88045a277e 100644 --- a/volatility3/framework/plugins/mac/pslist.py +++ b/volatility3/framework/plugins/mac/pslist.py @@ -2,12 +2,14 @@ # which is available at https://www.volatilityfoundation.org/license/vsl-v1.0 # +import datetime import logging -from typing import Callable, Iterable, List, Dict +from typing import Callable, Dict, Iterable, List -from volatility3.framework import renderers, interfaces, exceptions +from volatility3.framework import exceptions, interfaces, renderers from volatility3.framework.configuration import requirements from volatility3.framework.objects import utility +from volatility3.framework.renderers import format_hints from volatility3.framework.symbols import mac vollog = logging.getLogger(__name__) @@ -105,10 +107,20 @@ def _generator(self): self.config["kernel"], filter_func=self.create_pid_filter(self.config.get("pid", None)), ): + offset = format_hints.Hex(task.vol.offset) + name = utility.array_to_string(task.p_comm) pid = task.p_pid + uid = task.p_uid + gid = task.p_gid + start_time_seconds = task.p_start.tv_sec + start_time_microseconds = task.p_start.tv_usec + start_time = datetime.datetime.fromtimestamp( + start_time_seconds + start_time_microseconds / 1e6 + ) + ppid = task.p_ppid - name = utility.array_to_string(task.p_comm) - yield (0, (pid, ppid, name)) + + yield (0, (offset, name, pid, uid, gid, start_time, ppid)) @classmethod def list_tasks_allproc( @@ -310,5 +322,14 @@ def list_tasks_pid_hash_table( def run(self): return renderers.TreeGrid( - [("PID", int), ("PPID", int), ("COMM", str)], self._generator() + [ + ("OFFSET", format_hints.Hex), + ("NAME", str), + ("PID", int), + ("UID", int), + ("GID", int), + ("Start Time", datetime.datetime), + ("PPID", int), + ], + self._generator(), ) diff --git a/volatility3/framework/plugins/windows/crashinfo.py b/volatility3/framework/plugins/windows/crashinfo.py index a9f32f63d6..4ecd850872 100644 --- a/volatility3/framework/plugins/windows/crashinfo.py +++ b/volatility3/framework/plugins/windows/crashinfo.py @@ -14,6 +14,8 @@ class Crashinfo(interfaces.plugins.PluginInterface): + """Lists the information from a Windows crash dump.""" + _required_framework_version = (2, 0, 0) @classmethod diff --git a/volatility3/framework/plugins/windows/ldrmodules.py b/volatility3/framework/plugins/windows/ldrmodules.py index 9642810a58..4c8456fa96 100644 --- a/volatility3/framework/plugins/windows/ldrmodules.py +++ b/volatility3/framework/plugins/windows/ldrmodules.py @@ -7,6 +7,8 @@ class LdrModules(interfaces.plugins.PluginInterface): + """Lists the loaded modules in a particular windows memory image.""" + _required_framework_version = (2, 0, 0) _version = (1, 0, 0) diff --git a/volatility3/framework/plugins/windows/modules.py b/volatility3/framework/plugins/windows/modules.py index ff61c215cd..7dabf69545 100644 --- a/volatility3/framework/plugins/windows/modules.py +++ b/volatility3/framework/plugins/windows/modules.py @@ -42,6 +42,12 @@ def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface] default=False, optional=True, ), + requirements.StringRequirement( + name="name", + description="module name/sub string", + optional=True, + default=None, + ), ] def _generator(self): @@ -63,6 +69,9 @@ def _generator(self): except exceptions.InvalidAddressException: FullDllName = "" + if self.config["name"] and self.config["name"] not in BaseDllName: + continue + file_output = "Disabled" if self.config["dump"]: file_handle = dlllist.DllList.dump_pe( diff --git a/volatility3/framework/plugins/windows/strings.py b/volatility3/framework/plugins/windows/strings.py index 32f3df4c84..0eaa65884b 100644 --- a/volatility3/framework/plugins/windows/strings.py +++ b/volatility3/framework/plugins/windows/strings.py @@ -149,9 +149,9 @@ def generate_mapping( for mapval in layer.mapping(0x0, layer.maximum_address, ignore_errors=True): offset, _, mapped_offset, mapped_size, maplayer = mapval for val in range(mapped_offset, mapped_offset + mapped_size, 0x1000): - cur_set = reverse_map.get(mapped_offset >> 12, set()) + cur_set = reverse_map.get(val >> 12, set()) cur_set.add(("kernel", offset)) - reverse_map[mapped_offset >> 12] = cur_set + reverse_map[val >> 12] = cur_set if progress_callback: progress_callback( (offset * 100) / layer.maximum_address, diff --git a/volatility3/framework/plugins/windows/vadinfo.py b/volatility3/framework/plugins/windows/vadinfo.py index 812affe865..abc6142fe0 100644 --- a/volatility3/framework/plugins/windows/vadinfo.py +++ b/volatility3/framework/plugins/windows/vadinfo.py @@ -198,6 +198,7 @@ def vad_dump( def _generator(self, procs): kernel = self.context.modules[self.config["kernel"]] + kernel_layer = self.context.layers[kernel.layer_name] def passthrough(_: interfaces.objects.ObjectInterface) -> bool: return False @@ -229,7 +230,7 @@ def filter_function(x: interfaces.objects.ObjectInterface) -> bool: ( proc.UniqueProcessId, process_name, - format_hints.Hex(vad.vol.offset), + format_hints.Hex(kernel_layer.canonicalize(vad.vol.offset)), format_hints.Hex(vad.get_start()), format_hints.Hex(vad.get_end()), vad.get_tag(), diff --git a/volatility3/framework/plugins/yarascan.py b/volatility3/framework/plugins/yarascan.py index 1c548e5a62..1c84676895 100644 --- a/volatility3/framework/plugins/yarascan.py +++ b/volatility3/framework/plugins/yarascan.py @@ -34,13 +34,27 @@ def __init__(self, rules) -> None: if rules is None: raise ValueError("No rules provided to YaraScanner") self._rules = rules + self.st_object = not tuple([int(x) for x in yara.__version__.split(".")]) < ( + 4, + 3, + ) def __call__( self, data: bytes, data_offset: int ) -> Iterable[Tuple[int, str, str, bytes]]: for match in self._rules.match(data=data): - for offset, name, value in match.strings: - yield (offset + data_offset, match.rule, name, value) + if self.st_object: + for match_string in match.strings: + for instance in match_string.instances: + yield ( + instance.offset + data_offset, + match.rule, + match_string.identifier, + instance.matched_data, + ) + else: + for offset, name, value in match.strings: + yield (offset + data_offset, match.rule, name, value) class YaraScan(plugins.PluginInterface): diff --git a/volatility3/framework/symbols/linux/__init__.py b/volatility3/framework/symbols/linux/__init__.py index ce07167e52..5c42a436d6 100644 --- a/volatility3/framework/symbols/linux/__init__.py +++ b/volatility3/framework/symbols/linux/__init__.py @@ -1,7 +1,7 @@ # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0 # which is available at https://www.volatilityfoundation.org/license/vsl-v1.0 # -from typing import Iterator, List, Tuple, Optional +from typing import Iterator, List, Tuple, Optional, Union from volatility3 import framework from volatility3.framework import constants, exceptions, interfaces, objects @@ -28,8 +28,11 @@ def __init__(self, *args, **kwargs) -> None: self.set_type_class("fs_struct", extensions.fs_struct) self.set_type_class("files_struct", extensions.files_struct) self.set_type_class("kobject", extensions.kobject) + self.set_type_class("cred", extensions.cred) + self.set_type_class("kernel_cap_struct", extensions.kernel_cap_struct) # Might not exist in the current symbols self.optional_set_type_class("module", extensions.module) + self.optional_set_type_class("bpf_prog", extensions.bpf_prog) # Mount self.set_type_class("vfsmount", extensions.vfsmount) @@ -50,92 +53,113 @@ def __init__(self, *args, **kwargs) -> None: self.optional_set_type_class("bt_sock", extensions.bt_sock) self.optional_set_type_class("xdp_sock", extensions.xdp_sock) + # Only found in 6.1+ kernels + self.optional_set_type_class("maple_tree", extensions.maple_tree) + class LinuxUtilities(interfaces.configuration.VersionableInterface): """Class with multiple useful linux functions.""" - _version = (2, 0, 0) + _version = (2, 1, 0) _required_framework_version = (2, 0, 0) framework.require_interface_version(*_required_framework_version) - # based on __d_path from the Linux kernel @classmethod - def _do_get_path(cls, rdentry, rmnt, dentry, vfsmnt) -> str: - ret_path: List[str] = [] + def _get_path_file(cls, task, filp) -> str: + """Returns the file pathname relative to the task's root directory. - while dentry != rdentry or vfsmnt != rmnt: - dname = dentry.path() - if dname == "": - break + Args: + task (task_struct): A reference task + filp (file *): A pointer to an open file - ret_path.insert(0, dname.strip("/")) - if dentry == vfsmnt.get_mnt_root() or dentry == dentry.d_parent: - if vfsmnt.get_mnt_parent() == vfsmnt: - break + Returns: + str: File pathname relative to the task's root directory. + """ + rdentry = task.fs.get_root_dentry() + rmnt = task.fs.get_root_mnt() + vfsmnt = filp.get_vfsmnt() + dentry = filp.get_dentry() - dentry = vfsmnt.get_mnt_mountpoint() - vfsmnt = vfsmnt.get_mnt_parent() + return cls.do_get_path(rdentry, rmnt, dentry, vfsmnt) - continue + @classmethod + def get_path_mnt(cls, task, mnt) -> str: + """Returns the mount point pathname relative to the task's root directory. - parent = dentry.d_parent - dentry = parent + Args: + task (task_struct): A reference task + mnt (vfsmount or mount): A mounted filesystem or a mount point. + - kernels < 3.3.8 type is 'vfsmount' + - kernels >= 3.3.8 type is 'mount' - # if we did not gather any valid dentrys in the path, then the entire file is - # either 1) smeared out of memory or 2) de-allocated and corresponding structures overwritten - # we return an empty string in this case to avoid confusion with something like a handle to the root - # directory (e.g., "/") - if not ret_path: - return "" + Returns: + str: Pathname of the mount point relative to the task's root directory. + """ + rdentry = task.fs.get_root_dentry() + rmnt = task.fs.get_root_mnt() - ret_val = "/".join([str(p) for p in ret_path if p != ""]) + vfsmnt = mnt.get_vfsmnt_current() + dentry = mnt.get_dentry_current() - if ret_val.startswith(("socket:", "pipe:")): - if ret_val.find("]") == -1: - try: - inode = dentry.d_inode - ino = inode.i_ino - except exceptions.InvalidAddressException: - ino = 0 + return cls.do_get_path(rdentry, rmnt, dentry, vfsmnt) - ret_val = ret_val[:-1] + f":[{ino}]" - else: - ret_val = ret_val.replace("/", "") + @classmethod + def do_get_path(cls, rdentry, rmnt, dentry, vfsmnt) -> Union[None, str]: + """Returns a pathname of the mount point or file + It mimics the Linux kernel prepend_path function. - elif ret_val != "inotify": - ret_val = "/" + ret_val + Args: + rdentry (dentry *): A pointer to the root dentry + rmnt (vfsmount *): A pointer to the root vfsmount + dentry (dentry *): A pointer to the dentry + vfsmnt (vfsmount *): A pointer to the vfsmount - return ret_val + Returns: + str: Pathname of the mount point or file + """ - # method used by 'older' kernels - # TODO: lookup when dentry_operations->d_name was merged into the mainline kernel for exact version - @classmethod - def _get_path_file(cls, task, filp) -> str: - rdentry = task.fs.get_root_dentry() - rmnt = task.fs.get_root_mnt() - dentry = filp.get_dentry() - vfsmnt = filp.get_vfsmnt() + path_reversed = [] + while dentry != rdentry or not vfsmnt.is_equal(rmnt): + if dentry == vfsmnt.get_mnt_root() or dentry.is_root(): + # Escaped? + if dentry != vfsmnt.get_mnt_root(): + break + + # Global root? + if not vfsmnt.has_parent(): + break - return LinuxUtilities._do_get_path(rdentry, rmnt, dentry, vfsmnt) + dentry = vfsmnt.get_dentry_parent() + vfsmnt = vfsmnt.get_vfsmnt_parent() + + continue + + parent = dentry.d_parent + dname = dentry.d_name.name_as_str() + path_reversed.append(dname.strip("/")) + dentry = parent + + path = "/" + "/".join(reversed(path_reversed)) + return path @classmethod def _get_new_sock_pipe_path(cls, context, task, filp) -> str: - dentry = filp.get_dentry() + """Returns the sock pipe pathname relative to the task's root directory. - sym_addr = dentry.d_op.d_dname + Args: + context: The context to retrieve required elements (layers, symbol tables) from + task (task_struct): A reference task + filp (file *): A pointer to a sock pipe open file - symbol_table_arr = sym_addr.vol.type_name.split("!") - symbol_table = None - if len(symbol_table_arr) == 2: - symbol_table = symbol_table_arr[0] + Returns: + str: Sock pipe pathname relative to the task's root directory. + """ + dentry = filp.get_dentry() - for module_name in context.modules.get_modules_by_symbol_tables(symbol_table): - kernel_module = context.modules[module_name] - break - else: - raise ValueError(f"No module using the symbol table {symbol_table}") + kernel_module = cls.get_module_from_volobj_type(context, dentry) + sym_addr = dentry.d_op.d_dname symbs = list(kernel_module.get_symbols_by_absolute_location(sym_addr)) if len(symbs) == 1: @@ -163,12 +187,26 @@ def _get_new_sock_pipe_path(cls, context, task, filp) -> str: return ret - # a 'file' structure doesn't have enough information to properly restore its full path - # we need the root mount information from task_struct to determine this @classmethod def path_for_file(cls, context, task, filp) -> str: + """Returns a file (or sock pipe) pathname relative to the task's root directory. + + A 'file' structure doesn't have enough information to properly restore its + full path we need the root mount information from task_struct to determine this + + Args: + context: The context to retrieve required elements (layers, symbol tables) from + task (task_struct): A reference task + filp (file *): A pointer to an open file + + Returns: + str: A file (or sock pipe) pathname relative to the task's root directory. + """ + + # Memory smear protection: Check that both the file and dentry pointers are valid. try: dentry = filp.get_dentry() + dentry.is_root() except exceptions.InvalidAddressException: return "" @@ -348,3 +386,35 @@ def container_of( return vmlinux.object( object_type=type_name, offset=container_addr, absolute=True ) + + @classmethod + def get_module_from_volobj_type( + cls, + context: interfaces.context.ContextInterface, + volobj: interfaces.objects.ObjectInterface, + ) -> interfaces.context.ModuleInterface: + """Get the vmlinux from a vol obj + + Args: + context: The context to retrieve required elements (layers, symbol tables) from + volobj (vol object): A vol object + + Raises: + ValueError: If it cannot obtain any module from the symbol table + + Returns: + A kernel object (vmlinux) + """ + symbol_table_arr = volobj.vol.type_name.split("!", 1) + symbol_table = symbol_table_arr[0] if len(symbol_table_arr) == 2 else None + + module_names = context.modules.get_modules_by_symbol_tables(symbol_table) + module_names = list(module_names) + + if not module_names: + raise ValueError(f"No module using the symbol table '{symbol_table}'") + + kernel_module_name = module_names[0] + kernel = context.modules[kernel_module_name] + + return kernel diff --git a/volatility3/framework/symbols/linux/extensions/__init__.py b/volatility3/framework/symbols/linux/extensions/__init__.py index 5ab8f1aa0b..616e54e703 100644 --- a/volatility3/framework/symbols/linux/extensions/__init__.py +++ b/volatility3/framework/symbols/linux/extensions/__init__.py @@ -5,7 +5,7 @@ import collections.abc import logging import socket as socket_module -from typing import Generator, Iterable, Iterator, Optional, Tuple +from typing import Generator, Iterable, Iterator, Optional, Tuple, List from volatility3.framework import constants from volatility3.framework.constants.linux import SOCK_TYPES, SOCK_FAMILY @@ -13,6 +13,7 @@ from volatility3.framework.constants.linux import TCP_STATES, NETLINK_PROTOCOLS from volatility3.framework.constants.linux import ETH_PROTOCOLS, BLUETOOTH_STATES from volatility3.framework.constants.linux import BLUETOOTH_PROTOCOLS, SOCKET_STATES +from volatility3.framework.constants.linux import CAPABILITIES from volatility3.framework import exceptions, objects, interfaces, symbols from volatility3.framework.layers import linear from volatility3.framework.objects import utility @@ -34,10 +35,8 @@ def get_module_base(self): def get_init_size(self): if self.has_member("init_layout"): return self.init_layout.size - elif self.has_member("init_size"): return self.init_size - raise AttributeError( "module -> get_init_size: Unable to determine .init section size of module" ) @@ -45,10 +44,8 @@ def get_init_size(self): def get_core_size(self): if self.has_member("core_layout"): return self.core_layout.size - elif self.has_member("core_size"): return self.core_size - raise AttributeError( "module -> get_core_size: Unable to determine core size of module" ) @@ -58,7 +55,6 @@ def get_module_core(self): return self.core_layout.base elif self.has_member("module_core"): return self.module_core - raise AttributeError("module -> get_module_core: Unable to get module core") def get_module_init(self): @@ -66,7 +62,6 @@ def get_module_init(self): return self.init_layout.base elif self.has_member("module_init"): return self.module_init - raise AttributeError("module -> get_module_core: Unable to get module init") def get_name(self): @@ -88,7 +83,6 @@ def _get_sect_count(self, grp): idx = 0 while arr[idx]: idx = idx + 1 - return idx def get_sections(self): @@ -97,7 +91,6 @@ def get_sections(self): num_sects = self.sect_attrs.nsections else: num_sects = self._get_sect_count(self.sect_attrs.grp) - arr = self._context.object( self.get_symbol_table().name + constants.BANG + "array", layer_name=self.vol.layer_name, @@ -116,7 +109,6 @@ def get_symbols(self): prefix = "Elf64_" else: prefix = "Elf32_" - elf_table_name = intermed.IntermediateSymbolTable.create( self.context, self.config_path, @@ -155,7 +147,6 @@ def section_symtab(self): return self.kallsyms.symtab elif self.has_member("symtab"): return self.symtab - raise AttributeError("module -> symtab: Unable to get symtab") @property @@ -164,7 +155,6 @@ def num_symtab(self): return int(self.kallsyms.num_symtab) elif self.has_member("num_symtab"): return int(self.num_symtab) - raise AttributeError( "module -> num_symtab: Unable to determine number of symbols" ) @@ -177,7 +167,6 @@ def section_strtab(self): # Older kernels elif self.has_member("strtab"): return self.strtab - raise AttributeError("module -> strtab: Unable to get strtab") @@ -195,19 +184,15 @@ def add_process_layer( pgd = self.mm.pgd except exceptions.InvalidAddressException: return None - if not isinstance(parent_layer, linear.LinearlyMappedLayer): raise TypeError( "Parent layer is not a translation layer, unable to construct process layer" ) - dtb, layer_name = parent_layer.translate(pgd) if not dtb: return None - if preferred_name is None: preferred_name = self.vol.layer_name + f"_Process{self.pid}" - # Add the constructed layer and return the name return self._add_process_layer( self._context, dtb, config_prefix, preferred_name @@ -229,7 +214,6 @@ def get_process_memory_sections( vollog.info( f"adding vma: {start:x} {self.mm.brk:x} | {end:x} {self.mm.start_brk:x}" ) - yield (start, end - start) @property @@ -282,7 +266,6 @@ def get_root_dentry(self): return self.root elif self.root.has_member("dentry"): return self.root.dentry - raise AttributeError("Unable to find the root dentry") def get_root_mnt(self): @@ -291,17 +274,135 @@ def get_root_mnt(self): return self.rootmnt elif self.root.has_member("mnt"): return self.root.mnt - raise AttributeError("Unable to find the root mount") +class maple_tree(objects.StructType): + # include/linux/maple_tree.h + # Mask for Maple Tree Flags + MT_FLAGS_HEIGHT_MASK = 0x7C + MT_FLAGS_HEIGHT_OFFSET = 0x02 + + # Shift and mask to extract information from maple tree node pointers + MAPLE_NODE_TYPE_SHIFT = 0x03 + MAPLE_NODE_TYPE_MASK = 0x0F + MAPLE_NODE_POINTER_MASK = 0xFF + + # types of Maple Tree Nodes + MAPLE_DENSE = 0 + MAPLE_LEAF_64 = 1 + MAPLE_RANGE_64 = 2 + MAPLE_ARANGE_64 = 3 + + def get_slot_iter(self): + """Parse the Maple Tree and return every non zero slot.""" + maple_tree_offset = self.vol.offset & ~(self.MAPLE_NODE_POINTER_MASK) + expected_maple_tree_depth = ( + self.ma_flags & self.MT_FLAGS_HEIGHT_MASK + ) >> self.MT_FLAGS_HEIGHT_OFFSET + yield from self._parse_maple_tree_node( + self.ma_root, maple_tree_offset, expected_maple_tree_depth + ) + + def _parse_maple_tree_node( + self, + maple_tree_entry, + parent, + expected_maple_tree_depth, + seen=set(), + current_depth=1, + ): + """Recursively parse Maple Tree Nodes and yield all non empty slots""" + + # protect against unlikely loop + if maple_tree_entry in seen: + vollog.warning( + f"The mte {hex(maple_tree_entry)} has all ready been seen, no further results will be produced for this node." + ) + return + else: + seen.add(maple_tree_entry) + # check if we have exceeded the expected depth of this maple tree. + # e.g. when current_depth is larger than expected_maple_tree_depth there may be an issue. + # it is normal that expected_maple_tree_depth is equal to current_depth. + if expected_maple_tree_depth < current_depth: + vollog.warning( + f"The depth for the maple tree at {hex(self.vol.offset)} is {expected_maple_tree_depth}, however when parsing the nodes " + f"a depth of {current_depth} was reached. This is unexpected and may lead to incorrect results." + ) + # parse the mte to extract the pointer value, node type, and leaf status + pointer = maple_tree_entry & ~(self.MAPLE_NODE_POINTER_MASK) + node_type = ( + maple_tree_entry >> self.MAPLE_NODE_TYPE_SHIFT + ) & self.MAPLE_NODE_TYPE_MASK + + # create a pointer object for the node parent mte (note this will include flags in the low bits) + symbol_table_name = self.get_symbol_table_name() + node_parent_mte = self._context.object( + symbol_table_name + constants.BANG + "pointer", + layer_name=self.vol.native_layer_name, + offset=pointer, + ) + + # extract the actual pointer to the parent of this node + node_parent_pointer = node_parent_mte & ~(self.MAPLE_NODE_POINTER_MASK) + + # verify that the node_parent_pointer correctly points to the parent + assert node_parent_pointer == parent + + # create a node object + node = self._context.object( + symbol_table_name + constants.BANG + "maple_node", + layer_name=self.vol.layer_name, + offset=pointer, + ) + + # parse the slots based on the node type + if node_type == self.MAPLE_DENSE: + for slot in node.alloc.slot: + if (slot & ~(self.MAPLE_NODE_TYPE_MASK)) != 0: + yield slot + elif node_type == self.MAPLE_LEAF_64: + for slot in node.mr64.slot: + if (slot & ~(self.MAPLE_NODE_TYPE_MASK)) != 0: + yield slot + elif node_type == self.MAPLE_RANGE_64: + for slot in node.mr64.slot: + if (slot & ~(self.MAPLE_NODE_TYPE_MASK)) != 0: + yield from self._parse_maple_tree_node( + slot, + pointer, + expected_maple_tree_depth, + seen, + current_depth + 1, + ) + elif node_type == self.MAPLE_ARANGE_64: + for slot in node.ma64.slot: + if (slot & ~(self.MAPLE_NODE_TYPE_MASK)) != 0: + yield from self._parse_maple_tree_node( + slot, + pointer, + expected_maple_tree_depth, + seen, + current_depth + 1, + ) + else: + # unkown maple node type + raise AttributeError( + f"Unkown Maple Tree node type {node_type} at offset {hex(pointer)}." + ) + + class mm_struct(objects.StructType): def get_mmap_iter(self) -> Iterable[interfaces.objects.ObjectInterface]: """Returns an iterator for the mmap list member of an mm_struct.""" + if not self.has_member("mmap"): + raise AttributeError( + "get_mmap_iter called on mm_struct where no mmap member exists." + ) if not self.mmap: return - yield self.mmap seen = {self.mmap.vol.offset} @@ -312,6 +413,33 @@ def get_mmap_iter(self) -> Iterable[interfaces.objects.ObjectInterface]: seen.add(link.vol.offset) link = link.vm_next + def get_maple_tree_iter(self) -> Iterable[interfaces.objects.ObjectInterface]: + """Returns an iterator for the mm_mt member of an mm_struct.""" + + if not self.has_member("mm_mt"): + raise AttributeError( + "get_maple_tree_iter called on mm_struct where no mm_mt member exists." + ) + symbol_table_name = self.get_symbol_table_name() + for vma_pointer in self.mm_mt.get_slot_iter(): + # convert pointer to vm_area_struct and yield + vma = self._context.object( + symbol_table_name + constants.BANG + "vm_area_struct", + layer_name=self.vol.native_layer_name, + offset=vma_pointer, + ) + yield vma + + def get_vma_iter(self) -> Iterable[interfaces.objects.ObjectInterface]: + """Returns an iterator for the VMAs in an mm_struct. Automatically choosing the mmap or mm_mt as required.""" + + if self.has_member("mmap"): + yield from self.get_mmap_iter() + elif self.has_member("mm_mt"): + yield from self.get_maple_tree_iter() + else: + raise AttributeError("Unable to find mmap or mm_mt in mm_struct") + class super_block(objects.StructType): # include/linux/kdev_t.h @@ -418,7 +546,6 @@ def _parse_flags(self, vm_flags, parse_flags) -> str: retval = retval + char else: retval = retval + "-" - return retval # only parse the rwx bits @@ -432,7 +559,6 @@ def get_flags(self) -> str: def get_page_offset(self) -> int: if self.vm_file == 0: return 0 - return self.vm_pgoff << constants.linux.PAGE_SHIFT def get_name(self, context, task): @@ -449,21 +575,36 @@ def get_name(self, context, task): fname = "[vdso]" else: fname = "Anonymous Mapping" - return fname # used by malfind - def is_suspicious(self): + def is_suspicious(self, proclayer=None): ret = False flags_str = self.get_protection() if flags_str == "rwx": ret = True - elif flags_str == "r-x" and self.vm_file.dereference().vol.offset == 0: ret = True - + elif proclayer and "x" in flags_str: + for i in range(self.vm_start, self.vm_end, 1 << constants.linux.PAGE_SHIFT): + try: + if proclayer.is_dirty(i): + vollog.warning( + f"Found malicious (dirty+exec) page at {hex(i)} !" + ) + # We do not attempt to find other dirty+exec pages once we have found one + ret = True + break + except ( + exceptions.PagedInvalidAddressException, + exceptions.InvalidAddressException, + ) as excp: + vollog.debug(f"Unable to translate address {hex(i)} : {excp}") + # Abort as it is likely that other addresses in the same range will also fail + ret = False + break return ret @@ -473,12 +614,10 @@ def name_as_str(self) -> str: str_length = self.len + 1 # Maximum length should include null terminator else: str_length = 255 - try: ret = objects.utility.pointer_to_string(self.name, str_length) except (exceptions.InvalidAddressException, ValueError): ret = "" - return ret @@ -509,7 +648,6 @@ def is_subdir(self, old_dentry): """ if self.vol.offset == old_dentry: return True - return self.d_ancestor(old_dentry) def d_ancestor(self, ancestor_dentry): @@ -527,10 +665,8 @@ def d_ancestor(self, ancestor_dentry): ): if current_dentry.d_parent == ancestor_dentry.vol.offset: return current_dentry - dentry_seen.add(current_dentry.vol.offset) current_dentry = current_dentry.d_parent - return None @@ -544,6 +680,7 @@ def get_dentry(self) -> interfaces.objects.ObjectInterface: raise AttributeError("Unable to find file -> dentry") def get_vfsmnt(self) -> interfaces.objects.ObjectInterface: + """Returns the fs (vfsmount) where this file is mounted""" if self.has_member("f_vfsmnt"): return self.f_vfsmnt elif self.has_member("f_path"): @@ -587,12 +724,10 @@ def to_list( link = getattr(self, direction).dereference() except exceptions.InvalidAddressException: return - if not sentinel: yield self._context.object( symbol_type, layer, offset=self.vol.offset - relative_offset ) - seen = {self.vol.offset} while link.vol.offset not in seen: obj = self._context.object( @@ -675,11 +810,70 @@ def get_mnt_flags(self): raise AttributeError("Unable to find mount -> mount flags") def get_mnt_parent(self): + """Gets the fs where we are mounted on + + Returns: + A mount pointer + """ return self.mnt_parent def get_mnt_mountpoint(self): + """Gets the dentry of the mountpoint + + Returns: + A dentry pointer + """ + return self.mnt_mountpoint + def get_parent_mount(self): + return self.mnt.get_parent_mount() + + def has_parent(self) -> bool: + """Checks if this mount has a parent + + Returns: + bool: 'True' if this mount has a parent + """ + return self.mnt_parent != self.vol.offset + + def get_vfsmnt_current(self): + """Returns the fs where we are mounted on + + Returns: + A 'vfsmount' + """ + return self.mnt + + def get_vfsmnt_parent(self): + """Gets the parent fs (vfsmount) to where it's mounted on + + Returns: + A 'vfsmount' + """ + + return self.get_mnt_parent().get_vfsmnt_current() + + def get_dentry_current(self): + """Returns the root of the mounted tree + + Returns: + A dentry pointer + """ + vfsmnt = self.get_vfsmnt_current() + dentry = vfsmnt.mnt_root + + return dentry + + def get_dentry_parent(self): + """Returns the parent root of the mounted tree + + Returns: + A dentry pointer + """ + + return self.get_mnt_parent().get_dentry_current() + def get_flags_access(self) -> str: return "ro" if self.get_mnt_flags() & self.MNT_READONLY else "rw" @@ -703,9 +897,6 @@ def is_slave(self) -> bool: def get_devname(self) -> str: return utility.pointer_to_string(self.mnt_devname, count=255) - def has_parent(self) -> bool: - return self.vol.offset != self.mnt_parent - def get_dominating_id(self, root) -> int: """Get ID of closest dominating peer group having a representative under the given root.""" mnt_seen = set() @@ -718,7 +909,6 @@ def get_dominating_id(self, root) -> int: peer = current_mnt.get_peer_under_root(self.mnt_ns, root) if peer and peer.vol.offset != 0: return peer.mnt_group_id - mnt_seen.add(current_mnt.vol.offset) current_mnt = current_mnt.mnt_master return 0 @@ -734,12 +924,10 @@ def get_peer_under_root(self, ns, root): current_mnt.mnt.mnt_root, root ): return current_mnt - mnt_seen.add(current_mnt.vol.offset) current_mnt = current_mnt.next_peer() if current_mnt.vol.offset == self.vol.offset: break - return None def is_path_reachable(self, current_dentry, root): @@ -756,7 +944,6 @@ def is_path_reachable(self, current_dentry, root): current_dentry = current_mnt.mnt_mountpoint mnt_seen.add(current_mnt.vol.offset) current_mnt = current_mnt.mnt_parent - return current_mnt.mnt.vol.offset == root.mnt and current_dentry.is_subdir( root.dentry ) @@ -783,24 +970,121 @@ def is_valid(self): and self.get_mnt_parent() != 0 ) + def _is_kernel_prior_to_struct_mount(self) -> bool: + """Helper to distinguish between kernels prior to version 3.3.8 that + lacked the 'mount' structure and later versions that have it. + + The 'mnt_parent' member was moved from struct 'vfsmount' to struct + 'mount' when the latter was introduced. + + Alternatively, vmlinux.has_type('mount') can be used here but it is faster. + + Returns: + bool: 'True' if the kernel + """ + + return self.has_member("mnt_parent") + + def is_equal(self, vfsmount_ptr) -> bool: + """Helper to make sure it is comparing two pointers to 'vfsmount'. + + Depending on the kernel version, the calling object (self) could be + a 'vfsmount \*' (<3.3.8) or a 'vfsmount' (>=3.3.8). This way we trust + in the framework "auto" dereferencing ability to assure that when we + reach this point 'self' will be a 'vfsmount' already and self.vol.offset + a 'vfsmount \*' and not a 'vfsmount \*\*'. The argument must be a 'vfsmount \*'. + Typically, it's called from do_get_path(). + + Args: + vfsmount_ptr (vfsmount \*): A pointer to a 'vfsmount' + + Raises: + exceptions.VolatilityException: If vfsmount_ptr is not a 'vfsmount \*' + + Returns: + bool: 'True' if the given argument points to the the same 'vfsmount' + as 'self'. + """ + if type(vfsmount_ptr) == objects.Pointer: + return self.vol.offset == vfsmount_ptr + else: + raise exceptions.VolatilityException( + "Unexpected argument type. It has to be a 'vfsmount *'" + ) + def _get_real_mnt(self): - table_name = self.vol.type_name.split(constants.BANG)[0] - mount_struct = f"{table_name}{constants.BANG}mount" - offset = self._context.symbol_space.get_type( - mount_struct - ).relative_child_offset("mnt") + """Gets the struct 'mount' containing this 'vfsmount'. - return self._context.object( - mount_struct, self.vol.layer_name, offset=self.vol.offset - offset + It should be only called from kernels >= 3.3.8 when 'struct mount' was introduced. + + Returns: + mount: the struct 'mount' containing this 'vfsmount'. + """ + vmlinux = linux.LinuxUtilities.get_module_from_volobj_type(self._context, self) + return linux.LinuxUtilities.container_of( + self.vol.offset, "mount", "mnt", vmlinux ) + def get_vfsmnt_current(self): + """Returns the current fs where we are mounted on + + Returns: + A vfsmount pointer + """ + return self.get_mnt_parent() + + def get_vfsmnt_parent(self): + """Gets the parent fs (vfsmount) to where it's mounted on + + Returns: + For kernels < 3.3.8: A vfsmount pointer + For kernels >= 3.3.8: A vfsmount object + """ + if self._is_kernel_prior_to_struct_mount(): + return self.get_mnt_parent() + else: + return self._get_real_mnt().get_vfsmnt_parent() + + def get_dentry_current(self): + """Returns the root of the mounted tree + + Returns: + A dentry pointer + """ + if self._is_kernel_prior_to_struct_mount(): + return self.get_mnt_mountpoint() + else: + return self._get_real_mnt().get_dentry_current() + + def get_dentry_parent(self): + """Returns the parent root of the mounted tree + + Returns: + A dentry pointer + """ + if self._is_kernel_prior_to_struct_mount(): + return self.get_mnt_mountpoint() + else: + return self._get_real_mnt().get_mnt_mountpoint() + def get_mnt_parent(self): - if self.has_member("mnt_parent"): + """Gets the mnt_parent member. + + Returns: + For kernels < 3.3.8: A vfsmount pointer + For kernels >= 3.3.8: A mount pointer + """ + if self._is_kernel_prior_to_struct_mount(): return self.mnt_parent else: - return self._get_real_mnt().mnt_parent + return self._get_real_mnt().get_mnt_parent() def get_mnt_mountpoint(self): + """Gets the dentry of the mountpoint + + Returns: + A dentry pointer + """ if self.has_member("mnt_mountpoint"): return self.mnt_mountpoint else: @@ -809,6 +1093,41 @@ def get_mnt_mountpoint(self): def get_mnt_root(self): return self.mnt_root + def has_parent(self) -> bool: + if self._is_kernel_prior_to_struct_mount(): + return self.mnt_parent != self.vol.offset + else: + return self._get_real_mnt().has_parent() + + def get_mnt_sb(self): + return self.mnt_sb + + def get_flags_access(self) -> str: + return "ro" if self.mnt_flags & mount.MNT_READONLY else "rw" + + def get_flags_opts(self) -> Iterable[str]: + flags = [ + mntflagtxt + for mntflag, mntflagtxt in mount.MNT_FLAGS.items() + if mntflag & self.mnt_flags != 0 + ] + return flags + + def get_mnt_flags(self): + return self.mnt_flags + + def is_shared(self) -> bool: + return self.get_mnt_flags() & mount.MNT_SHARED + + def is_unbindable(self) -> bool: + return self.get_mnt_flags() & mount.MNT_UNBINDABLE + + def is_slave(self) -> bool: + return self.mnt_master and self.mnt_master.vol.offset != 0 + + def get_devname(self) -> str: + return utility.pointer_to_string(self.mnt_devname, count=255) + class kobject(objects.StructType): def reference_count(self): @@ -817,7 +1136,6 @@ def reference_count(self): ret = refcnt.counter else: ret = refcnt.refs.counter - return ret @@ -825,7 +1143,7 @@ class mnt_namespace(objects.StructType): def get_inode(self): if self.has_member("proc_inum"): return self.proc_inum - elif self.ns.has_member("inum"): + elif self.has_member("ns") and self.ns.has_member("inum"): return self.ns.inum else: raise AttributeError("Unable to find mnt_namespace inode") @@ -836,7 +1154,6 @@ def get_mount_points(self): if not self._context.symbol_space.has_type(mnt_type): # Old kernels ~ 2.6 mnt_type = table_name + constants.BANG + "vfsmount" - for mount in self.list.to_list(mnt_type, "mnt_list"): yield mount @@ -844,10 +1161,13 @@ def get_mount_points(self): class net(objects.StructType): def get_inode(self): if self.has_member("proc_inum"): + # 3.8.13 <= kernel < 3.19.8 return self.proc_inum - elif self.ns.has_member("inum"): + elif self.has_member("ns") and self.ns.has_member("inum"): + # kernel >= 3.19.8 return self.ns.inum else: + # kernel < 3.8.13 raise AttributeError("Unable to find net_namespace inode") @@ -861,7 +1181,6 @@ def _get_vol_kernel(self): ) if not module_names: raise ValueError(f"No module using the symbol table {symbol_table}") - kernel_module_name = module_names[0] kernel = self._context.modules[kernel_module_name] return kernel @@ -871,7 +1190,6 @@ def get_inode(self): kernel = self._get_vol_kernel() except ValueError: return 0 - socket_alloc = linux.LinuxUtilities.container_of( self.vol.offset, "socket_alloc", "socket", kernel ) @@ -897,7 +1215,6 @@ def get_type(self): def get_inode(self): if not self.sk_socket: return 0 - return self.sk_socket.get_inode() def get_protocol(self): @@ -907,7 +1224,6 @@ def get_state(self): # Return the generic socket state if self.has_member("sk"): return self.sk.sk_socket.get_state() - return self.sk_socket.get_state() @@ -915,7 +1231,6 @@ class unix_sock(objects.StructType): def get_name(self): if not self.addr: return - sockaddr_un = self.addr.name.cast("sockaddr_un") saddr = str(utility.array_to_string(sockaddr_un.sun_path)) return saddr @@ -951,7 +1266,6 @@ def get_protocol(self): protocol = IP_PROTOCOLS.get(self.sk.sk_protocol) if self.get_family() == "AF_INET6": protocol = IPV6_PROTOCOLS.get(self.sk.sk_protocol, protocol) - return protocol def get_state(self): @@ -982,7 +1296,6 @@ def get_dst_port(self): dport_le = sk_common.skc_dport else: return - return socket_module.htons(dport_le) def get_src_addr(self): @@ -1001,7 +1314,6 @@ def get_src_addr(self): saddr = self.pinet6.saddr else: return - parent_layer = self._context.layers[self.vol.layer_name] try: addr_bytes = parent_layer.read(saddr.vol.offset, addr_size) @@ -1010,7 +1322,6 @@ def get_src_addr(self): f"Unable to read socket src address from {saddr.vol.offset:#x}" ) return - return socket_module.inet_ntop(family, addr_bytes) def get_dst_addr(self): @@ -1032,7 +1343,6 @@ def get_dst_addr(self): addr_size = 16 else: return - parent_layer = self._context.layers[self.vol.layer_name] try: addr_bytes = parent_layer.read(daddr.vol.offset, addr_size) @@ -1041,7 +1351,6 @@ def get_dst_addr(self): f"Unable to read socket dst address from {daddr.vol.offset:#x}" ) return - return socket_module.inet_ntop(family, addr_bytes) @@ -1055,6 +1364,26 @@ def get_state(self): # Return the generic socket state return self.sk.sk_socket.get_state() + def get_portid(self): + if self.has_member("pid"): + # kernel < 3.7.10 + return self.pid + if self.has_member("portid"): + # kernel >= 3.7.10 + return self.portid + else: + raise AttributeError("Unable to find a source port id") + + def get_dst_portid(self): + if self.has_member("dst_pid"): + # kernel < 3.7.10 + return self.dst_pid + if self.has_member("dst_portid"): + # kernel >= 3.7.10 + return self.dst_portid + else: + raise AttributeError("Unable to find a destination port id") + class vsock_sock(objects.StructType): def get_protocol(self): @@ -1101,3 +1430,154 @@ def get_protocol(self): def get_state(self): # xdp_sock.state is an enum return self.state.lookup() + + +class bpf_prog(objects.StructType): + def get_type(self): + # The program type was in `bpf_prog_aux::prog_type` from 3.18.140 to + # 4.1.52 before it was moved to `bpf_prog::type` + if self.has_member("type"): + # kernel >= 4.1.52 + return self.type + + if self.has_member("aux") and self.aux: + if self.aux.has_member("prog_type"): + # 3.18.140 <= kernel < 4.1.52 + return self.aux.prog_type + + # kernel < 3.18.140 + raise AttributeError("Unable to find the BPF type") + + +class cred(objects.StructType): + # struct cred was added in kernels 2.6.29 + def _get_cred_int_value(self, member: str) -> int: + """Helper to obtain the right cred member value for the current kernel. + + Args: + member (str): The requested cred member name to obtain its value + + Raises: + AttributeError: When the requested cred member doesn't exist + AttributeError: When the cred implementation is not supported. + + Returns: + int: The cred member value + """ + if not self.has_member(member): + raise AttributeError(f"struct cred doesn't have a '{member}' member") + + cred_val = self.member(member) + if hasattr(cred_val, "val"): + # From kernels 3.5.7 on it is a 'kuid_t' type + value = cred_val.val + elif isinstance(cred_val, objects.Integer): + # From at least 2.6.30 and until 3.5.7 it was a 'uid_t' type which was an 'unsigned int' + value = cred_val + else: + raise AttributeError("Kernel struct cred is not supported") + + return int(value) + + @property + def euid(self): + """Returns the effective user ID + + Returns: + int: the effective user ID value + """ + return self._get_cred_int_value("euid") + + +class kernel_cap_struct(objects.StructType): + # struct kernel_cap_struct was added in kernels 2.5.0 + @classmethod + def get_last_cap_value(cls) -> int: + """Returns the latest capability ID supported by the framework. + + Returns: + int: The latest supported capability ID supported by the framework. + """ + return len(CAPABILITIES) - 1 + + def get_kernel_cap_full(self) -> int: + """Return the maximum value allowed for this kernel for a capability + + Returns: + int: _description_ + """ + vmlinux = linux.LinuxUtilities.get_module_from_volobj_type(self._context, self) + try: + cap_last_cap = vmlinux.object_from_symbol(symbol_name="cap_last_cap") + except exceptions.SymbolError: + # It should be a kernel < 3.2, let's use our list of capabilities + cap_last_cap = self.get_last_cap_value() + + return (1 << cap_last_cap + 1) - 1 + + @classmethod + def capabilities_to_string(cls, capabilities_bitfield: int) -> List[str]: + """Translates a capability bitfield to a list of capability strings. + + Args: + capabilities_bitfield (int): The capability bitfield value. + + Returns: + List[str]: A list of capability strings. + """ + + capabilities = [] + for bit, name in enumerate(CAPABILITIES): + if capabilities_bitfield & (1 << bit) != 0: + capabilities.append(name) + + return capabilities + + def get_capabilities(self) -> int: + """Returns the capability bitfield value + + Returns: + int: The capability bitfield value. + """ + + if isinstance(self.cap, objects.Array): + # In 2.6.25.x <= kernels < 6.3 kernel_cap_struct::cap is a two + # elements __u32 array that constitutes a 64bit bitfield. + # Technically, it can also be an array of 1 element if + # _KERNEL_CAPABILITY_U32S = _LINUX_CAPABILITY_U32S_1 + # However, in the source code, that never happens. + # From 2.6.24 to 2.6.25 cap became an array of 2 elements. + cap_value = (self.cap[1] << 32) | self.cap[0] + else: + # In kernels < 2.6.25.x kernel_cap_struct::cap was a __u32 + # In kernels >= 6.3 kernel_cap_struct::cap is a u64 + cap_value = self.cap + + return cap_value & self.get_kernel_cap_full() + + def enumerate_capabilities(self) -> List[str]: + """Returns the list of capability strings. + + Returns: + List[str]: The list of capability strings. + """ + capabilities_value = self.get_capabilities() + return self.capabilities_to_string(capabilities_value) + + def has_capability(self, capability: str) -> bool: + """Checks if the given capability string is enabled. + + Args: + capability (str): A string representing the capability i.e. dac_read_search + + Raises: + AttributeError: If the given capability is unknown to the framework. + + Returns: + bool: "True" if the given capability is enabled. + """ + if capability not in CAPABILITIES: + raise AttributeError(f"Unknown capability with name '{capability}'") + + cap_value = 1 << CAPABILITIES.index(capability) + return cap_value & self.get_capabilities() != 0 diff --git a/volatility3/framework/symbols/linux/xen.json b/volatility3/framework/symbols/linux/xen.json new file mode 100644 index 0000000000..8e843e7288 --- /dev/null +++ b/volatility3/framework/symbols/linux/xen.json @@ -0,0 +1,115 @@ +{ + "symbols": { + }, + "user_types": { + "xen_p2m": { + "fields":{ + "entries": { + "offset": 0, + "type": { + "count": 1, + "kind": "array", + "subtype": { + "kind": "base", + "name": "unsigned long long" + } + } + } + }, + "kind": "struct", + "size": 8 + }, + "xen_pfn":{ + "fields":{ + "entries": { + "offset": 0, + "type": { + "count": 1, + "kind": "array", + "subtype": { + "kind": "base", + "name": "unsigned long long" + } + } + } + }, + "kind": "struct", + "size": 16 + }, + "xen_pfn_entry":{ + "fields":{ + "pfn":{ + "offset": 0, + "type": { + "kind": "base", + "name": "unsigned long long" + } + }, + "gmfn":{ + "offset": 8, + "type": { + "kind": "base", + "name": "unsigned long long" + } + } + }, + "kind": "struct", + "size": 16 + + } + }, + "enums": { + }, + "base_types": { + "unsigned char": { + "endian": "little", + "kind": "char", + "signed": false, + "size": 1 + }, + "unsigned short": { + "endian": "little", + "kind": "int", + "signed": false, + "size": 2 + }, + "long": { + "endian": "little", + "kind": "int", + "signed": true, + "size": 4 + }, + "char": { + "endian": "little", + "kind": "char", + "signed": true, + "size": 1 + }, + "unsigned long": { + "endian": "little", + "kind": "int", + "signed": false, + "size": 4 + }, + "long long": { + "endian": "little", + "kind": "int", + "signed": true, + "size": 8 + }, + "unsigned long long": { + "endian": "little", + "kind": "int", + "signed": false, + "size": 8 + } + }, + "metadata": { + "producer": { + "version": "0.0.1", + "name": "ikelos-by-hand", + "datetime": "2023-01-09T00:51:00" + }, + "format": "6.1.0" + } +} diff --git a/volatility3/framework/symbols/windows/extensions/pool.py b/volatility3/framework/symbols/windows/extensions/pool.py index ac7f36a997..b761ddad88 100644 --- a/volatility3/framework/symbols/windows/extensions/pool.py +++ b/volatility3/framework/symbols/windows/extensions/pool.py @@ -397,12 +397,9 @@ def NameInfo(self) -> interfaces.objects.ObjectInterface: symbol_table_name = self.vol.type_name.split(constants.BANG)[0] - try: - header_offset = self.NameInfoOffset - except AttributeError: - # http://codemachine.com/article_objectheader.html (Windows 7 and later) - name_info_bit = 0x2 - + if symbol_table_name in self._context.modules: + ntkrnlmp = self._context.modules[symbol_table_name] + else: layer = self._context.layers[self.vol.native_layer_name] kvo = layer.config.get("kernel_virtual_offset", None) @@ -411,16 +408,24 @@ def NameInfo(self) -> interfaces.objects.ObjectInterface: f"Could not find kernel_virtual_offset for layer: {self.vol.layer_name}" ) + # We know this symbol table name can't exist because we checked for it earlier ntkrnlmp = self._context.module( symbol_table_name, layer_name=self.vol.layer_name, offset=kvo ) + + try: + header_offset = self.NameInfoOffset + except AttributeError: + # http://codemachine.com/article_objectheader.html (Windows 7 and later) + name_info_bit = 0x2 + address = ntkrnlmp.get_symbol("ObpInfoMaskToOffset").address calculated_index = self.InfoMask & (name_info_bit | (name_info_bit - 1)) - header_offset = self._context.object( - symbol_table_name + constants.BANG + "unsigned char", + header_offset = ntkrnlmp.object( + "unsigned char", layer_name=self.vol.native_layer_name, - offset=kvo + address + calculated_index, + offset=address + calculated_index, ) if header_offset == 0: @@ -430,10 +435,11 @@ def NameInfo(self) -> interfaces.objects.ObjectInterface: ) ) - header = self._context.object( - symbol_table_name + constants.BANG + "_OBJECT_HEADER_NAME_INFO", + header = ntkrnlmp.object( + "_OBJECT_HEADER_NAME_INFO", layer_name=self.vol.layer_name, offset=self.vol.offset - header_offset, native_layer_name=self.vol.native_layer_name, + absolute=True, ) return header diff --git a/volatility3/plugins/windows/statistics.py b/volatility3/plugins/windows/statistics.py index e921b35654..9915312e39 100644 --- a/volatility3/plugins/windows/statistics.py +++ b/volatility3/plugins/windows/statistics.py @@ -13,6 +13,8 @@ class Statistics(plugins.PluginInterface): + """Lists statistics about the memory space.""" + _required_framework_version = (2, 0, 0) @classmethod diff --git a/volshell.py b/volshell.py old mode 100644 new mode 100755