diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index 4f2653c2e..4e2604ada 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -24,6 +24,13 @@ jobs: steps: - uses: actions/checkout@v3.3.0 + - name: Cache media + uses: actions/cache@v3.2.3 + with: + path: | + ${{github.workspace}}/mediarepo + key: dev-media + - name: Install tools shell: bash run: | @@ -65,12 +72,6 @@ jobs: mkdir -p ~/install/usr cmake --preset $PROFILE_CONAN . -DCMAKE_INSTALL_PREFIX=~/install/usr - # Disabled for now since we don't generate any package. - # - name: Download media - # working-directory: ${{github.workspace}}/build - # shell: bash - # run: cmake --build . --preset $PROFILE_CONAN --target assets-download - - name: Build shell: bash run: cmake --build --preset $PROFILE_CONAN @@ -83,6 +84,16 @@ jobs: shell: bash run: cmake --build --preset $PROFILE_CONAN --target install + - name: Download media + working-directory: ${{github.workspace}}/build + shell: bash + run: cmake --build . --preset $PROFILE_CONAN --target mediarepo-checkout + + - name: Process and install media + working-directory: ${{github.workspace}}/build + shell: bash + run: cmake --build . --preset $PROFILE_CONAN --target media-process-install + - name: Build Snap if: env.BUILD_SNAP == 'true' shell: bash diff --git a/CMakeLists.txt b/CMakeLists.txt index f82d034f0..84a74aa62 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -143,6 +143,27 @@ else (SUBVERSION_CMD) ) endif (SUBVERSION_CMD) +set(MEDIAREPO_PROCESSED_DIR ${CMAKE_INSTALL_PREFIX}/share/cyphesis/assets) +if (NOT MEDIA_PROCESS_TEXTURE_MAX_SIZE) + set(MEDIA_PROCESS_TEXTURE_MAX_SIZE 512) +endif () + +add_custom_target(media-process-install) +if (Python3_EXECUTABLE) + add_custom_command( + TARGET media-process-install + COMMAND ${CMAKE_COMMAND} -E echo "Processing media in '${MEDIAREPO_DIR}/trunk' and placing it in '${MEDIAREPO_PROCESSED_DIR}'." + COMMAND ${CMAKE_COMMAND} -E echo "NOTE: this will make Cyphesis use the processed media in '${MEDIAREPO_PROCESSED_DIR}' instead of the raw media in '${MEDIAREPO_DIR}/trunk'." + COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tools/scripts/make_dist_media.py ${MEDIAREPO_DIR}/trunk ${MEDIAREPO_PROCESSED_DIR} ${MEDIA_PROCESS_TEXTURE_MAX_SIZE} + ) +else (Python3_EXECUTABLE) + add_custom_command( + TARGET media-process-install + COMMAND ${CMAKE_COMMAND} -E echo "Could not find the Python command 'python'. The target 'mediarepo-process' is disabled." + ) +endif (Python3_EXECUTABLE) + + ##TODO: check for binreloc? #We'll use xmllint for validating schemas of some of our xml files. diff --git a/README.md b/README.md index 77b31c838..94fa898d5 100644 --- a/README.md +++ b/README.md @@ -27,26 +27,15 @@ The simplest way to install all required dependencies is by using [Conan](https: conan remote add worldforge https://artifactory.ogenvik.org/artifactory/api/conan/conan conan install . --build missing cmake --preset conan-release -DCMAKE_INSTALL_PREFIX=./build/install/release -cmake --build --preset conan-release -j --target all --target mediarepo-checkout --target install +cmake --build --preset conan-release -j --target all +cmake --build --preset conan-release -j --target mediarepo-checkout +cmake --build --preset conan-release -j --target media-process-install +cmake --build --preset conan-release -j --target install ``` -Alternatively you can use the [Hammer](http://wiki.worldforge.org/wiki/Hammer_Script "The Hammer script") tool. -This is script provided by the Worldforge project which will download and install all of the required libraries and -components used by Worldforge. - -Otherwise the server can most easily be built through the following commands. - -``` -mkdir build_`arch` && cd build_`arch` -cmake .. -make -make assets-download -make install -``` - -Note that you also probably want to install the defaults worlds from -[Worldforge Worlds](https://github.com/worldforge/worlds) (this is done automatically by the -Hammer build tool). +NOTE: The invocation of the target "media-process-install" is optional. It will go through the raw Subversion assets and +convert .png to .dds as well as scaling down textures. If you omit this step Cyphesis will instead use the raw +Subversion media. Which you might want if you're developing locally. ### Tests @@ -76,12 +65,14 @@ When editing the Python scripts that make up the rulesets it's a good idea to ad your IDE's Python include paths. This directory contains stubs generated from the C++ bindings, which makes things such as type lookup and code completion easier. -These stubs are auto generated from the C++ bindings through the custom target "GeneratePythonDocs". +These stubs are auto generated from the C++ bindings through the custom target "GeneratePythonDocs". Execute this target whenever you've done edits to the Python bindings. ## Dependencies -We use Conan for our dependency handling. If you're developing locally you can issue this command to setup both a "debug" and "release" environment. +We use Conan for our dependency handling. If you're developing locally you can issue this command to setup both a " +debug" and "release" environment. + ```bash conan install -s build_type=Debug . --build missing --update && conan install . --build missing --update ``` diff --git a/tools/scripts/make_dist_media.py b/tools/scripts/make_dist_media.py new file mode 100755 index 000000000..67843bcce --- /dev/null +++ b/tools/scripts/make_dist_media.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Requires Python 3 or higher + +import fileinput +import fnmatch +import os +import re +import shutil +import subprocess + +import sys + + +def find_matches(files, patterns): + matches = [] + for pattern in patterns: + for file in fnmatch.filter(files, pattern): + matches.append(file) + return matches + + +def collect_licenses(src_assets_path, licenses, assets): + """Walk upwards looking for any license or author files""" + for asset in assets: + asset_path = os.path.join(src_assets_path, asset) + if os.path.isfile(asset_path): + asset_path_tokens = os.path.dirname(asset).split(os.sep) + while len(asset_path_tokens) > 0: + directory = os.sep.join(asset_path_tokens) + for file in find_matches(os.listdir(os.path.join(src_assets_path, directory)), + ["*license*", "*LICENSE*", "*authors*", "*AUTHORS*", "*COPYING*"]): + licenses.add(os.path.join(directory, file)) + del asset_path_tokens[-1] + + +def copy_assets(src_assets_path, dest_assets_path, assets, assets_no_conversion, image_max_size=None): + original_size = 0 + destination_size = 0 + copied = 0 + skipped = 0 + converted = 0 + errors = 0 + for asset in assets: + asset_path = os.path.join(src_assets_path, asset) + if os.path.isfile(asset_path): + + png_to_dds_conversion_needed = False + + dest_asset_path = os.path.join(dest_assets_path, asset) + + dest_asset_dir_path = os.path.dirname(dest_asset_path) + if not os.path.exists(dest_asset_dir_path): + os.makedirs(dest_asset_dir_path) + + if dest_asset_path.endswith(".png") and not asset.startswith( + "common/ui") and asset not in assets_no_conversion: + (dest_asset_path, _) = re.subn(r'.png$', '.dds', dest_asset_path) + png_to_dds_conversion_needed = True + + # Check if the destination file exists, and if so is it older than the source + if os.path.exists(dest_asset_path) and os.path.getmtime(dest_asset_path) >= os.path.getmtime(asset_path): + # destination file is newer or of the same date as the source file + skipped = skipped + 1 + else: + if asset_path.endswith((".png", ".dds", ".jpg")) and not asset.startswith("common/ui"): + compression_format = "" + + convert_cmd = ["convert"] + + if image_max_size: + convert_cmd.append('-resize "{0}x{0}>"'.format(image_max_size)) + + if png_to_dds_conversion_needed: + imagemetadata = subprocess.check_output(["file", asset_path]) + # TODO: use bc3n or bc1n for normal maps, with support in shaders + if "RGBA" in imagemetadata.decode("utf-8"): + compression_format = "dxt5" + else: + compression_format = "dxt1" + + convert_cmd.append('-define dds:compression={0}'.format(compression_format)) + + convert_cmd.append(asset_path) + convert_cmd.append(dest_asset_path) + + print("converting image asset {0} to {1} ({2})".format(asset, dest_asset_path, compression_format)) + if os.system(" ".join(convert_cmd)) == 0: + converted = converted + 1 + else: + errors = errors + 1 + else: + print("copying asset {0} to {1}".format(asset, dest_asset_path)) + shutil.copy(asset_path, dest_asset_path) + copied = copied + 1 + if asset.endswith(".material") and not asset.startswith("common/ui"): + print("processing material to replace 'png' with 'dds'") + for line in fileinput.input(dest_asset_path, inplace=True): + print(line.replace(".png", ".dds"), end='') + + original_size = original_size + os.path.getsize(asset_path) + destination_size = destination_size + os.path.getsize(dest_asset_path) + + else: + errors = errors + 1 + print("referenced file {0} does not exist".format(asset_path)) + + return copied, converted, skipped, errors, original_size, destination_size + + +def copytree(src, dst): + if not os.path.exists(dst): + os.makedirs(dst) + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + # Skip "source" directories. + if item == "source": + continue + copytree(s, d) + else: + if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1: + print("Copying " + s) + shutil.copy2(s, d) + + +def main(): + """The main entry point.""" + usage = """usage: {0} + +This script will automatically scan the media source repo and create a repository suitable for distributions. +This process mainly involves copying assets and resizing textures. + + +MEDIA_DIRECTORY_TRUNK is the path to the root of the media directory, named "trunk" + +OUTPUT_DIRECTORY is where the processed assets will be copied + +IMAGE_MAX_SIZE (optional) the max size in pixels of any image. Any larger image will be scaled + +OPTIONS + -h Show this help text +""".format(sys.argv[0]) + + if len(sys.argv) == 1: + print("ERROR: MEDIA_DIRECTORY_TRUNK must be specified!") + print(usage) + sys.exit(1) + elif len(sys.argv) == 2: + print("ERROR: OUTPUT_DIRECTORY must be specified!") + print(usage) + sys.exit(1) + + if sys.argv[1] == "-h": + print(usage) + sys.exit(0) + + src_media_dir = sys.argv[1] + # Some extra precaution. + if os.path.basename(src_media_dir) != "trunk": + print( + "The first parameter should be a directory names 'trunk', as that's the root of the Subversion media repository.") + sys.exit(1) + + src_assets_dir = os.path.join(src_media_dir, "assets") + dest_assets_dir = os.path.abspath(sys.argv[2]) + + if not os.path.exists(dest_assets_dir): + os.makedirs(dest_assets_dir) + + max_size = None + if len(sys.argv) > 3: + max_size = int(sys.argv[3]) + + # Copy license files + shutil.copy(os.path.join(src_media_dir, "LICENSING.txt"), dest_assets_dir) + shutil.copy(os.path.join(src_media_dir, "COPYING.txt"), dest_assets_dir) + + # Put all assets here, as paths relative to the "assets" directory in the media repo. + assets = set() + assets_no_conversion = set() + + # Copy all files found in the "assets" directory. Skip "source" directories. + for dirpath, dirnames, files in os.walk(src_assets_dir): + if "source" in dirnames: + dirnames.remove("source") + for filename in files: + if filename.endswith(('.modeldef', '.entitymap', '.jpg', '.png', '.dds', '.skeleton', '.mesh', '.ttf', + '.material', '.program', '.cg', '.glsl', '.vert', '.frag', '.hlsl', '.overlay', + '.compositor', '.fontdef')): + assets.add(os.path.relpath(os.path.join(dirpath, filename), src_assets_dir)) + + licenses = set() + collect_licenses(src_assets_dir, licenses, assets) + + assets.update(licenses) + + copied, converted, skipped, errors, original_size, destination_size = copy_assets(src_assets_dir, dest_assets_dir, + assets, assets_no_conversion, + max_size) + print( + "Media conversion completed.\n{0} files copied, {1} images converted, {2} files skipped, {3} with errors".format( + copied, converted, skipped, errors)) + print("Original size: {0} MB".format(original_size / 1000000)) + print("Destination size: {0} MB".format(destination_size / 1000000)) + + pass + + +if __name__ == '__main__': + main()