diff --git a/.gitignore b/.gitignore index 1621399..9438c46 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ simulator/ __pycache__ + +*.pyc +.vscode/ +logs/* diff --git a/Dockerfile-ppo b/Dockerfile-ppo new file mode 100644 index 0000000..cf6f615 --- /dev/null +++ b/Dockerfile-ppo @@ -0,0 +1,35 @@ +################################################################################################### +# DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited. +# +# This material is based upon work supported by the Under Secretary of Defense for Research and +# Engineering under Air Force Contract No. FA8702-15-D-0001. Any opinions, findings, conclusions +# or recommendations expressed in this material are those of the author(s) and do not necessarily +# reflect the views of the Under Secretary of Defense for Research and Engineering. +# +# (c) 2020 Massachusetts Institute of Technology. +# +# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014) +# +# The software/firmware is provided to you on an As-Is basis +# +# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 +# or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work +# are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other +# than as specifically authorized by the U.S. Government may violate any copyrights that exist in +# this work. +################################################################################################### + +from goseek-base:latest + +RUN apt-get update && \ + apt-get install python-opencv -y && \ + pip install stable-baselines && \ + conda install tensorflow-gpu==1.13.1 + +WORKDIR /goseek-challenge + +COPY baselines/agents.py baselines/agents.py + +COPY baselines/config/ppo-agent.yaml agent.yaml + +COPY ppo-weights.pkl ppo-weights.pkl diff --git a/Instructions.md b/Instructions.md index 95bc9ea..54afc69 100644 --- a/Instructions.md +++ b/Instructions.md @@ -46,7 +46,7 @@ Note that we recommend installing in [development mode](https://setuptools.readt See [below](#training) for further discussion. -2. Clone this repository and install requirements. +3. Clone this repository and install requirements. ```sh git clone https://github.com/MIT-TESSE/goseek-challenge.git @@ -54,24 +54,24 @@ cd goseek-challenge ``` -3. Next, you need to obtain GOSEEK simulator. Execute the following: +4. Next, you need to obtain GOSEEK simulator. Execute the following: ```sh mkdir -p simulator -wget https://github.com/MIT-TESSE/goseek-challenge/releases/download/0.1.0/goseek-v0.1.0.zip -P simulator -unzip simulator/goseek-v0.1.0.zip -d simulator -chmod +x simulator/goseek-v0.1.0.x86_64 +wget https://github.com/MIT-TESSE/goseek-challenge/releases/download/0.1.0/goseek-v0.1.3.zip -P simulator +unzip simulator/goseek-v0.1.3.zip -d simulator +chmod +x simulator/goseek-v0.1.3.x86_64 ``` This creates a new `simulator` folder, download and unzips the simulator to that folder, and makes the simulator executable. -Note that if you choose to place the simulator in an alternative location, you will need to specify the location in a configuration file that overrides the default value such as in [config/ground-truth.yaml](config/ground-truth.yaml). +Note that if you choose to place the simulator in an alternative location, you will need to specify the location in a configuration file that overrides the default value such as in [config/check-ground-truth.yaml](config/check-ground-truth.yaml). -4. Test your installation by running a random agent. The agent receives observations and takes random actions: +5. Test your installation by running a random agent. The agent receives observations and takes random actions: ```sh -python eval.py --agent-config baselines/config/random-agent.yaml +python eval.py --agent-config baselines/config/random-agent.yaml --episode-config config/check-ground-truth.yaml ``` -5. Next, build a docker image called `goseek-base`, which is needed to submit online solutions. +6. Next, build a docker image called `goseek-base`, which is needed to submit online solutions. ```sh cd docker/goseek-base/ @@ -85,7 +85,29 @@ docker run --rm -it goseek-base /bin/bash -c "python eval.py --help" ``` -__NOTE__: In order to run the __Perception Pipeline__, you will need another docker image with [Kimera](https://github.com/MIT-SPARK/Kimera). Directions for building this image (named `goseek-kimera`) will be posted at a later time. +7. In order to run the __Perception Pipeline__, you will need another docker image with [Kimera](https://github.com/MIT-SPARK/Kimera). Directions for building this image (named `goseek-kimera`) are as follows. +Please note that building this image is not required to submit a solution for the competition; it is only used for any local testing and/or training with the __Perception Pipeline__. +We strongly encourage participants to review [further details](doc/perception-pipeline.md) on the perception pipeline, as well. + + a. First, obtain [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-download). + Note that you'll need to create an Nvidia account and manually download it because TensorRT is proprietary. + Select `TensorRT-6.0.1.5.Ubuntu-18.04.x86_64-gnu.cuda-10.0.cudnn7.6.tar.gz` from the Nvidia TensorRT + download page and **place the binary in `goseek-challenge/docker/goseek-kimera/`**. + + b. Build the docker image. + Note that this takes quite a while. + (Go watch a movie? :man_shrugging:) + + ```sh + docker build --rm -t goseek-kimera . + ``` + + c. And, optionally again, run the following to verify the docker image. + It should return the list of directory contents in [tesse_gym_bridge](https://www.github.com/MIT-TESSE/tesse-gym-bridge). + + ```sh + docker run --rm -it goseek-kimera /bin/bash -c "source /catkin_ws/devel/setup.bash && rosls tesse_gym_bridge" + ``` ## Usage @@ -126,7 +148,7 @@ class Agent: (1, 240, 320, 3) >>> segmentation.shape (1, 240, 320, 3) - >>> rgb.shape + >>> depth.shape (1, 240, 320, 3) >>> pose.shape (1, 3) @@ -214,17 +236,31 @@ optional arguments: For example, you can run the following to test against __Ground Truth__ data source: ```sh -python test_locally.py -s simulator/goseek-v0.1.0.x86_64 -i submission -g +python test_locally.py -s simulator/goseek-v0.1.3.x86_64 -i submission -g ``` +#### Submitting docker image + +As mentioned in [competition overview](README.md), we are using the [EvalAI](https://evalai.cloudcv.org/) platform to host our submission server. To upload a solution, please follow the instructions below: + +1. Install [EvalAI-CLI](https://evalai-cli.cloudcv.org/): `pip install evalai`. + +2. Create on account on EvalAI's [website](https://evalai.cloudcv.org/) and sign up for the [GOSEEK-Challenge](https://evalai.cloudcv.org/web/challenges/challenge-page/607/overview). + +3. Follow the instructions on the [submission](https://evalai.cloudcv.org/web/challenges/challenge-page/607/submission) tab to push your docker image. +Note that we've provided four phases -- some to support development. +Only the leader of the **Competition Phase with Perception Pipeline** will be declared the competition winner. ## Examples + See any of the following for additional information and examples. - [Baseline Proximal Policy Optimization (PPO)](doc/ppo-baseline.md) - [Additional problem details](doc/details.md) - +- [Instructions for running headless on a linux server](https://github.com/MIT-TESSE/tesse-core#running-tesse-headless) +- [Generating configuration files](doc/generate.md) +- [Details on the Perception Pipeline](doc/perception-pipeline.md) ## Disclaimer diff --git a/README.md b/README.md index 293f1b8..6937100 100644 --- a/README.md +++ b/README.md @@ -69,8 +69,15 @@ The current timeline for the competition is as follows: Over the course of the competition, any important announcements or updates will be listed in this section. We recommend that you follow this repository to be alerted to these announcements. -1. We've delayed the opening of submissions in light of [ICRA 2020](https://icra2020.org/)'s status, and will continue to track its status. -We do not anticipate that any future statements from the planning committee will change the current timeline of this challenge. +1. We've posted version 0.1.3 of the challenge simulator. +This provides better support for the __Perception Pipeline__ and addresses a minor bug. +Please download this new simulator, if you were using 0.1.0 before. +The link can be found in the [instructions](Instructions.md). +2. We've also updated [tesse-gym](https://github.com/MIT-TESSE/tesse-gym) since our initial release to support the __Perception Pipeline__. +You should update your clone of `tesse-gym` from the [instructions](Instructions.md). +Please also rebuild the `goseek-base` docker image, as well as any of your submission images. +3. We continue to track the status of [ICRA 2020](https://icra2020.org/). +We do not anticipate that any future statements from the planning committee will change the timeline of this challenge. ## Getting Started @@ -102,14 +109,22 @@ Use `test_locally.py` for local testing. Assume you've named your docker image `submission` as above, then evaluate your agent with __Ground Truth__ data as follows. ```sh -python test_locally.py -s simulator/goseek-v0.1.0.x86_64 -i submission -g +python test_locally.py -s simulator/goseek-v0.1.3.x86_64 -i submission -g ``` -__NOTE__: Instructions for testing your agent with the __Perception Pipeline__ will be posted shortly. +Similarly, evaluate your agent with __Perception Pipeline__ data as follows. +```sh +python test_locally.py -s simulator/goseek-v0.1.3.x86_64 -i submission -p +``` + +### Submit online -### Submit online. +1. Install [EvalAI-CLI](https://evalai-cli.cloudcv.org/): `pip install evalai`. -__NOTE__: Instructions for submitting agents online will be available according to the competition timeline [above](#timeline). +2. Create on account on EvalAI's [website](https://evalai.cloudcv.org/) and sign up for the [GOSEEK-Challenge](https://evalai.cloudcv.org/web/challenges/challenge-page/607/overview). +3. Follow the instructions on the [submission](https://evalai.cloudcv.org/web/challenges/challenge-page/607/submission) tab to push your docker image. +Note that we've provided four phases -- some to support development. +Only the leader of the **Competition Phase with Perception Pipeline** will be declared the competition winner. ## Acknowledgements diff --git a/baselines/config/ppo-agent.yaml b/baselines/config/ppo-agent.yaml index 7ce50bf..79609be 100644 --- a/baselines/config/ppo-agent.yaml +++ b/baselines/config/ppo-agent.yaml @@ -1,2 +1,2 @@ name: StableBaselinesPPO -weights: '' # MODEL WEIGHT PATH +weights: 'ppo-weights.pkl' # PUT WEIGHTS AT GOSEEK-CHALLENGE ROOT diff --git a/config/check-ground-truth.yaml b/config/check-ground-truth.yaml new file mode 100644 index 0000000..09e92d3 --- /dev/null +++ b/config/check-ground-truth.yaml @@ -0,0 +1,8 @@ +ENV: + sim_path: 'simulator/goseek-v0.1.3.x86_64' + +EPISODE: + scenes: [3, 5] + n_targets: [30, 30] + episode_length: [400, 400] + random_seeds: [10, 100] diff --git a/config/ground-truth.yaml b/config/ground-truth.yaml index 3be0b8b..cf28875 100644 --- a/config/ground-truth.yaml +++ b/config/ground-truth.yaml @@ -5,4 +5,4 @@ EPISODE: scenes: [3, 5] n_targets: [30, 30] episode_length: [400, 400] - random_seeds: [10, 100] \ No newline at end of file + random_seeds: [10, 100] diff --git a/config/perception-pipeline.yaml b/config/perception-pipeline.yaml index cbfe9ef..1f919a0 100644 --- a/config/perception-pipeline.yaml +++ b/config/perception-pipeline.yaml @@ -5,7 +5,7 @@ ENV: image_port: 9008 EPISODE: - scenes: [3, 5] - n_targets: [30, 30] - episode_length: [400, 400] - random_seeds: [10, 100] \ No newline at end of file + scenes: [3, 4, 5, 5] + n_targets: [30, 30, 30, 30] + episode_length: [400, 400, 400, 400] + random_seeds: [10, 1, 5, 6] diff --git a/doc/details.md b/doc/details.md index 2236204..eabb351 100644 --- a/doc/details.md +++ b/doc/details.md @@ -23,7 +23,7 @@ They don't spawn in other rooms (e.g., restroom, breakroom). ### Semantics -There are 10 semantic categories for objects as follows. +There are 11 semantic categories for objects as follows. - 0: floor (0,171,143) - 1: ceiling (1,155,211) - 2: wall (2,222,110) @@ -52,4 +52,4 @@ MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contra The software/firmware is provided to you on an As-Is basis -Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than as specifically authorized by the U.S. Government may violate any copyrights that exist in this work. \ No newline at end of file +Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than as specifically authorized by the U.S. Government may violate any copyrights that exist in this work. diff --git a/doc/generate.md b/doc/generate.md new file mode 100644 index 0000000..af7657f --- /dev/null +++ b/doc/generate.md @@ -0,0 +1,26 @@ +# Generating configuration files + +The follow example generates a configuration file with 10 episodes such as those in [../config](../config). +If you find this useful, please adapt for your own use. + +```py +import random +import yaml + +k = 10 # number of episodes to generate +l = 400 # episode length +f = 30 # number of fruit +scenes = [1, 2, 3, 4, 5] # scenes (1 through 5 are provided) + +data = {} +data['ENV'] = {'sim_path': ''} + +data['EPISODE'] = {'scenes': random.choices(scenes, k=k), + 'n_targets': [f for _ in range(k)], + 'episode_length': [l for _ in range(k)], + 'random_seeds': [random.randint(1, 1e6) for _ in range(k)] + } + +with open('example.yaml', 'w') as outfile: + yaml.dump(data, outfile) +``` \ No newline at end of file diff --git a/doc/images/depth_estimate.png b/doc/images/depth_estimate.png new file mode 100644 index 0000000..e46448f Binary files /dev/null and b/doc/images/depth_estimate.png differ diff --git a/doc/images/depth_ground_truth.png b/doc/images/depth_ground_truth.png new file mode 100644 index 0000000..a9cb0ad Binary files /dev/null and b/doc/images/depth_ground_truth.png differ diff --git a/doc/images/gt_est_pose.png b/doc/images/gt_est_pose.png new file mode 100755 index 0000000..15937a0 Binary files /dev/null and b/doc/images/gt_est_pose.png differ diff --git a/doc/images/gt_pose.png b/doc/images/gt_pose.png new file mode 100755 index 0000000..9f57248 Binary files /dev/null and b/doc/images/gt_pose.png differ diff --git a/doc/images/rgb_estimate.png b/doc/images/rgb_estimate.png new file mode 100644 index 0000000..529a6c7 Binary files /dev/null and b/doc/images/rgb_estimate.png differ diff --git a/doc/images/rgb_ground_truth.png b/doc/images/rgb_ground_truth.png new file mode 100644 index 0000000..529a6c7 Binary files /dev/null and b/doc/images/rgb_ground_truth.png differ diff --git a/doc/images/segmentation_estimate.png b/doc/images/segmentation_estimate.png new file mode 100644 index 0000000..8277d76 Binary files /dev/null and b/doc/images/segmentation_estimate.png differ diff --git a/doc/images/segmentation_ground_truth.png b/doc/images/segmentation_ground_truth.png new file mode 100644 index 0000000..ded7e06 Binary files /dev/null and b/doc/images/segmentation_ground_truth.png differ diff --git a/doc/images/unity_coords.png b/doc/images/unity_coords.png new file mode 100644 index 0000000..c0b7384 Binary files /dev/null and b/doc/images/unity_coords.png differ diff --git a/doc/perception-pipeline.md b/doc/perception-pipeline.md new file mode 100644 index 0000000..d04e85d --- /dev/null +++ b/doc/perception-pipeline.md @@ -0,0 +1,97 @@ +# Perception Pipeline + +As stated in the challenge overview, we provide two data sources: one is the simulator **Ground Truth** and the other is a realistic **Perception Pipeline**. + +1. __Ground Truth__: The agent observes ground truth (i.e., error free) information that is provided directly from the simulator. +2. __Perception Pipeline__: The agent observes output of [Kimera](http://web.mit.edu/sparklab/2019/10/13/Kimera__an_Open-Source_Library_for_Real-Time_Metric-Semantic_Localization_and_Mapping.html), which is an open-source C++ library for real-time metric-semantic visual-inertial Simultaneous Localization And Mapping (SLAM). +Note that the types (and dimensions) of observations provided are the same as before; however, the error characteristics are now representative of a real perception system. + +This page gives an overview of [Data Conventions](#data-conventions) and the [Perception Pipeline Algorithms](#perception-pipeline-algorithms). + +## Data Conventions + +Th agent observes several data products: monocular RGB, semantic segmentation, depth, and agent pose. +Conventions related to these data products are as follows. + +### Semantic Segmentation + +See the [details page](https://github.mit.edu/TESS/goseek-challenge/blob/perception-docs/doc/details.md#semantics) for a list of the semantic segmentation classes and corresponding RGB values. Note, the class index corresponds to the value of the first color channel. + +### Depth + +Depth is rendered within a specified range, known as [clip planes](https://docs.unity3d.com/Manual/class-Camera.html), and then mapped to the range `[0, 1]`. The simulator uses minimum and maximum clip planes of `0.05` and `50`, so to recover depth in meters multiply the provided image by `50`. Note, depth beyond 50 meters is truncated. In the [evaluation interface](https://github.mit.edu/TESS/goseek-challenge/blob/master/Instructions.md#local-evaluation) this would look like: + +```python +from tesse_gym.tasks.goseek import decode_observations + +class Agent: + """ Interface for submitting an agent for evaluation. """ + + def act(self, observation: np.ndarray) -> int: + far_clip_plane = 50 + rgb, segmentation, depth, pose = decode_observations(observation) + depth *= far_clip_plane # convert depth to meters + ... +``` + + +### Coordinate System for Poses + +We use the left handed coordinate system native to Unity (see [Unity documentation](https://docs.unity3d.com/560/Documentation/Manual/Transforms.html)). For world coordinates, x- and z-axes are aligned with the horizontal plane, and the y-axis is aligned with up. Pose is given as the vector `(x, z, yaw)` where the z-axis is aligned with forward, the x-axis is positive to the right, and yaw is relative to the positive up y-axis. + +![](images/unity_coords.png) + + + +## Perception Pipeline Algorithms + +During evaluation, participants will be provided with realistic perception data from [Kimera](https://github.com/MIT-SPARK/Kimera), an open-source C++ library for real-time metric-semantic visual-inertial Simultaneous Localization And Mapping (SLAM). Realistic perception estimates are obtained by passing ground truth simulator data through this pipeline. Thus, the types and dimensions of observations will remain the same; however, the error characteristics are now representative of a real perception system. + +Please note that running the perception pipeline requires significantly more computation than groundtruth. +Episodes will run **several times slower** than when running the groundtruth data pipeline. +The simulator is run in a continuous dynamics mode (compared to discrete dynamics when running the groundtruth data pipeline), and it is outputting imagery at a higher rate. +This higher-rate data is needed for estimating [pose](#pose) only, so agent policies will still receive data at the same rate as before. +In addition, several perception algorithms are now running as described below. + +We recommend thinking carefully about how you use this pipeline. +It may be less feasible to generate data with it for policy training, for example. + +### Segmentation + +A [U-Net](https://arxiv.org/pdf/1505.04597.pdf) provides segmentation estimates for the 11 GOSEEK [semantic classes](https://github.com/MIT-TESSE/goseek-challenge/blob/master/doc/details.md#semantics). The [segmentation-models.pytorch](https://github.com/qubvel/segmentation_models.pytorch) project was used to train the model on data collected from [scenes 1-4](https://github.com/MIT-TESSE/goseek-challenge/blob/master/doc/details.md#scenes) of the GOSEEK simulator. Scene 5 was used to collect a validation set on which the model achieves an Intersection-over-Union (IoU) score of roughly 0.8. The model was then exported to an ONNX file, provided in this [release](https://github.com/MIT-TESSE/tesse-segmentation-ros/releases), for inference in [TensorRT](https://developer.nvidia.com/tensorrt). + +Training details can be found in this [notebook](https://github.com/MIT-TESSE/tesse-segmentation-ros/blob/master/training/train-segmentation-models.ipynb). The inference framework is implemented [here](https://github.com/MIT-TESSE/tesse-segmentation-ros/blob/master/src/tesse_segmentation_ros/models.py#L57). + +### Depth +Depth is estimated via stereo reconstruction. Using a stereo image pair from the simulator, we use the ROS [`stereo_image_proc`](http://wiki.ros.org/stereo_image_proc) node to generate a point cloud which is then projected into the camera plane to produce a depth image. + + +### Pose +Pose is provided by [Kimera-VIO](https://github.com/MIT-SPARK/Kimera-VIO), a Visual Inertial Odometry pipeline for State Estimation from Stereo and IMU data. + +Below is a comparison of the data provided in Ground Truth and Perception modes. Pose is illustrated over a 100 step trajectory with the carrots representing position and heading every 1 step. + + +| | Ground Truth | Perception | +:----------------------:|:------------------------------------------:|:-------------------------: +| Monocular RGB | ![](images/rgb_ground_truth.png) | ![](images/rgb_estimate.png) +| Semantic Segmentation | ![](images/segmentation_ground_truth.png) | ![](images/segmentation_estimate.png) +| Depth | ![](images/depth_ground_truth.png) | ![](images/depth_estimate.png) +| Pose | ![](images/gt_pose.png) | ![](images/gt_est_pose.png) + + +## Disclaimer + +DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited. + +This material is based upon work supported by the Under Secretary of Defense for Research and Engineering under Air Force Contract No. FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the Under Secretary of Defense for Research and Engineering. + +(c) 2020 Massachusetts Institute of Technology. + +MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014) + +The software/firmware is provided to you on an As-Is basis + +Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than as specifically authorized by the U.S. Government may violate any copyrights that exist in this work. + + diff --git a/docker/goseek-base/Dockerfile b/docker/goseek-base/Dockerfile index 1d9e816..6cdb3a3 100644 --- a/docker/goseek-base/Dockerfile +++ b/docker/goseek-base/Dockerfile @@ -19,24 +19,21 @@ RUN apt-get update && \ apt-get install -y git curl && \ apt-get clean -RUN curl -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ - chmod +x ~/miniconda.sh && \ - ~/miniconda.sh -b -p /opt/conda && \ - rm ~/miniconda.sh && \ - /opt/conda/bin/conda clean -ya && \ - /opt/conda/bin/conda create -y -n python37 python=3.7 numpy pip scipy pyyaml pillow +RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b && \ + rm Miniconda3-latest-Linux-x86_64.sh -RUN /opt/conda/bin/conda update -y -n base -c defaults conda +ENV PATH /miniconda/bin:$PATH -# this will pseudo-activate the conda environment for all subsequent build commands -ENV PATH /opt/conda/envs/python37/bin:/opt/conda/envs/bin:$PATH +RUN conda update -y conda && \ + conda install -y -f python=3.7 numpy pip scipy pyyaml pillow -RUN pip install git+https://github.com/MIT-TESSE/tesse-gym.git@master +# TODO(ZR) clone master when needed changes are merged +RUN pip install -e git+https://github.com/MIT-TESSE/tesse-gym.git@master#egg=tesse-gym RUN git clone https://github.com/MIT-TESSE/goseek-challenge.git /goseek-challenge WORKDIR /goseek-challenge - ######################################################################################### # DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited. diff --git a/docker/goseek-kimera/Dockerfile b/docker/goseek-kimera/Dockerfile new file mode 100644 index 0000000..284c211 --- /dev/null +++ b/docker/goseek-kimera/Dockerfile @@ -0,0 +1,220 @@ +FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04 +# using devel to support installing pycuda +# if you are behind a proxy: https://docs.docker.com/network/proxy/ + +RUN apt-get clean && apt-get update && apt-get install -y locales +RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && \ + locale-gen +ENV LC_ALL en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US.UTF-8 +ENV SHELL /bin/bash +ENV DEBIAN_FRONTEND noninteractive + +# switch to bash within the container so ROS sourcing is easy in build commands +SHELL ["/bin/bash", "-c"] + +RUN apt-get update && \ + apt-get install -y curl bzip2 wget vim ffmpeg git tmux unzip + +# tesse: opencv dependency workaround from https://github.com/NVIDIA/nvidia-docker/issues/864 +RUN apt-get update && apt-get install -y libsm6 libxext6 libxrender-dev + +# kimera: +RUN apt-get update && \ + apt-get install -y --no-install-recommends apt-utils + +################################ +### ROS ### +################################ + +#Set the ROS distro +ENV ROS_DISTRO melodic + +# Add the ROS keys and package +RUN apt-get update && \ + apt-get install -y \ + lsb-release \ + gnupg +RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' +RUN mkdir ~/.gnupg +RUN echo "disable-ipv6" >> ~/.gnupg/dirmngr.conf + +RUN curl -sSL 'http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xC1CF6E31E6BADE8868B172B4F42ED6FBAB17C654' | apt-key add - + +# Install ROS +RUN apt-get update && \ + apt-get install -y ros-melodic-desktop-full + +RUN apt-get install -y \ + python \ + python-pip \ + python-dev + +RUN apt-get install python-rosdep + +# Set up ROS +RUN rosdep init +RUN rosdep update + +RUN source /opt/ros/melodic/setup.bash && \ + apt install -y \ + python-rosinstall \ + python-rosinstall-generator \ + python-wstool build-essential \ + python-catkin-tools + +# Setup catkin workspace +RUN source /opt/ros/melodic/setup.bash && \ + mkdir -p /catkin_ws/src && \ + cd /catkin_ws/ && \ + catkin init && \ + # Change `melodic` to your ROS distro + catkin config --extend /opt/ros/melodic && \ + catkin config --cmake-args -DCMAKE_BUILD_TYPE=Release && \ + catkin config --merge-devel + + +################################ +### ** TESSE ** ### +### ------------ ### +### TESSE ROS packages ### +### TESSE Anaconda packages ### +################################ + +WORKDIR /catkin_ws/src + +# tesse-ros-bridge dependency +RUN pip install scipy + +RUN git clone https://github.com/MIT-TESSE/tesse-interface.git -b 0.1.3-SNAPSHOT && \ + cd tesse-interface && \ + python setup.py install && \ + cd .. + +WORKDIR /catkin_ws/src +RUN source /opt/ros/melodic/setup.bash +RUN wstool init + +RUN git clone https://github.com/MIT-TESSE/tesse-ros-bridge.git && \ + wstool merge -y tesse-ros-bridge/install/tesse_ros_bridge.rosinstall + +RUN git clone https://github.com/MIT-TESSE/tesse-segmentation-ros.git && \ + wstool merge -y tesse-segmentation-ros/install/tesse_segmentation_ros.rosinstall + +RUN git clone https://github.com/MIT-TESSE/tesse-gym-bridge.git && \ + wstool merge -y tesse-gym-bridge/install/tesse_gym_bridge.rosinstall + +RUN wstool update +RUN catkin build tesse_ros_bridge tesse_segmentation_ros tesse_gym_bridge + + +################################ +### **MIT-SPARK KIMERA** ### +### ---------------- ### +### KIMERA-VIO-ROS ### +### KIMERA-Semantics ### +################################ + +# pinned commits: +# https://github.com/MIT-SPARK/Kimera-VIO-ROS @ f6181a1370677ffe8c24253554efebaf787f428e +# https://github.com/MIT-SPARK/Kimera-VIO @ 42ad9ca557656ef3463b8cc3b841fe5a4a36398c +# https://github.com/MIT-SPARK/Kimera-Semantics @ 0ae759f24cce9e9ffc28d0495327a3d2b37d2c99 + +# Kimera dependencies +RUN apt-get update && \ + apt-get install -y \ + libgoogle-glog-dev \ + doxygen \ + cmake build-essential pkg-config autoconf \ + libboost-all-dev \ + libjpeg-dev libpng-dev libtiff-dev \ + libvtk6-dev libgtk-3-dev \ + libatlas-base-dev gfortran \ + libparmetis-dev \ + libtbb-dev + +# suppress build warnings +ENV PYTHONIOENCODING=UTF-8 + +WORKDIR /catkin_ws/src + +RUN git clone https://github.com/MIT-SPARK/Kimera-VIO-ROS.git && \ + cd Kimera-VIO-ROS && \ + git checkout f6181a1370677ffe8c24253554efebaf787f428e && \ + cd .. && \ + wstool merge -y Kimera-VIO-ROS/install/kimera_vio_ros_https.rosinstall && \ + wstool update + +RUN git clone https://github.com/MIT-SPARK/Kimera-Semantics.git && \ + cd Kimera-Semantics && \ + git checkout 0ae759f24cce9e9ffc28d0495327a3d2b37d2c99 && \ + cd .. && \ + wstool merge -y Kimera-Semantics/kimera/install/kimera_semantics_https.rosinstall && \ + wstool update + +# checkout pinned commit of Kimera-VIO-ROS and Kimera-VIO +RUN cd Kimera-VIO-ROS && \ + git checkout f6181a1370677ffe8c24253554efebaf787f428e && \ + cd .. && \ + cd Kimera-VIO && \ + git checkout 42ad9ca557656ef3463b8cc3b841fe5a4a36398c && \ + cd .. + +RUN cd gtsam && \ + git fetch && \ + git checkout develop && \ + git pull + +RUN catkin build + +################################ +### Configs ### +################################ + +RUN wget https://github.com/MIT-TESSE/goseek-challenge/releases/download/evalai-beta-testing/goseek-unet-a1.onnx \ + -P /catkin_ws/src/tesse-segmentation-ros/cfg/ && \ + wget https://github.com/MIT-TESSE/goseek-challenge/releases/download/evalai-beta-testing/kimera-configs.zip && \ + unzip kimera-configs.zip && \ + mv kimera-configs/* /catkin_ws/src/Kimera-VIO/params/Tesse/ && \ + rm kimera-configs.zip + +RUN source /catkin_ws/devel/setup.bash + +WORKDIR / + +RUN echo "source /catkin_ws/devel/setup.bash" >> run_perception.sh && \ + echo "roslaunch tesse_gym_bridge run_goseek_perception.launch" >> run_perception.sh && \ + chmod +x /run_perception.sh + + +################################ +### TensorRT ### +################################ + +ADD TensorRT-6.0.1.5.Ubuntu-18.04.x86_64-gnu.cuda-10.0.cudnn7.6.tar.gz / +RUN mv /TensorRT-6.0.1.5 /tensorrt + +ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/tensorrt/lib + +# install tensorrt wheel into the python environment used with ROS +RUN source /opt/ros/melodic/setup.bash && \ + source /catkin_ws/devel/setup.bash && \ + cd /tensorrt/python/ && \ + pip install tensorrt-6.0.1.5-cp27-none-linux_x86_64.whl && \ + pip install pycuda + + +######################################################################################### +# Disclaimer: +# DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited. + +# This material is based upon work supported by the Under Secretary of Defense for Research and Engineering under Air Force Contract No. FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the Under Secretary of Defense for Research and Engineering. + +# © 2019 Massachusetts Institute of Technology. + +# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014) + +# The software/firmware is provided to you on an As-Is basis + +# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than as specifically authorized by the U.S. Government may violate any copyrights that exist in this work. diff --git a/logs/.gitkeep b/logs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/test_locally.py b/test_locally.py index 416c5e8..a2446f2 100644 --- a/test_locally.py +++ b/test_locally.py @@ -20,9 +20,222 @@ ################################################################################################### import argparse +import ast +import hashlib +import os +import pprint import subprocess import time -import os +from datetime import datetime + +RECALL_WEIGHT = 1 +PRECISION_WEIGHT = 0.1 +COLLISIONS_WEIGHT = 0.1 +ACTIONS_WEIGHT = 0.1 +EPISODE_LENGTH = 400 + +DOCKER_VIO_LOGDIR = "/catkin_ws/src/Kimera-VIO-ROS/output_logs" +HOST_VIO_LOGDIR = "kimera_vio_logs" + + +def _time_hash(prefix): + """ Get has based on current time. """ + hash = hashlib.sha1() + hash.update(str(time.time()).encode()) + return f"{prefix}_{hash.hexdigest()}" + + +def summarize(results): + n_episodes = len(results) - 1 + + metrics = {} + metrics["Recall"] = results["total"]["recall"] + metrics["Precision"] = results["total"]["precision"] + metrics["Collisions"] = ( + results["total"]["collisions"] / n_episodes + ) # results has total collisions + metrics["Actions"] = ( + results["total"]["steps"] / n_episodes + ) # results has total steps + + metrics["Weighted Total"] = ( + RECALL_WEIGHT * metrics["Recall"] + + PRECISION_WEIGHT * metrics["Precision"] + - COLLISIONS_WEIGHT * metrics["Collisions"] / EPISODE_LENGTH + - ACTIONS_WEIGHT * metrics["Actions"] / EPISODE_LENGTH + ) + return metrics + + +def start_perception_subprocess(image_name): + """ Start docker container with perception server as subprocess. """ + container_name = _time_hash("goseek_perception") + return ( + subprocess.Popen( + [ + "docker", + "run", + "--rm", + "--name", + f"{container_name}", + "--network=host", + "--gpus", + "all", + "-t", + f"{image_name}", + "/bin/bash", + "-c", + "source /catkin_ws/devel/setup.bash && " + "roslaunch tesse_gym_bridge run_goseek_perception.launch", + ], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ), + container_name, + ) + + +def start_submission_subprocess(config_dir, submission_image, episode_config_file): + submission_name = _time_hash("submission") + return ( + subprocess.Popen( + [ + "docker", + "run", + "--name", + f"{submission_name}", + "--network=host", + "--gpus", + "all", + "-v", + # note to goseek-challenge participants: + # we will use an internal config for our challenge evaluations + "{}:/config".format(config_dir), + "-t", + "{}".format(submission_image), + "/bin/bash", + "-c", + "python eval.py --episode-config /config/{} --agent-config agent.yaml".format( + episode_config_file + ), + ], + ), + submission_name, + ) + + +def kill_docker_container(container_name): + """ Kill docker container by name. """ + subprocess.call( + ["docker", "rm", "--force", f"{container_name}"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + +def get_docker_logs(container_name): + """ Get logs of docker container. """ + p = subprocess.run( + ["docker", "logs", container_name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + return p.stdout.decode(), "" # TODO parse output to stdout and stderr + + +def docker_cp(container_name, container_dir, host_dir): + subprocess.run(["docker", "cp", f"{container_name}:{container_dir}", host_dir]) + + +def write_logs(fname, data): + with open(fname, "w") as f: + f.write(data) + + +def extract_metrics(process, submission_stdout): + if process.returncode == 0: + results = submission_stdout.split("----- Per Episode Score -----")[-1] + metrics = summarize(ast.literal_eval(results)) + else: # Agent failure returns -1 weighted score + metrics = { + "Recall": 0, + "Precision": 0, + "Collisions": 0, + "Actions": 0, + "Weighted Total": -1, + } + return metrics + + +def log_perception_data(container_name, logdir): + perception_stdout, perception_stderr = get_docker_logs(container_name) + docker_cp( + container_name, DOCKER_VIO_LOGDIR, f"{logdir}/{HOST_VIO_LOGDIR}", + ) + + write_logs(f"{logdir}/perception-stdout.txt", perception_stdout) + write_logs(f"{logdir}/perception-stderr.txt", perception_stderr) + + +def main( + simulator: str, # path to simulator + kimera: bool, # specify whether to run kimera docker + image: str, # path to docker image + config: str, # path to configuration, + logdir: str = None, # optional log dir +): + + try: + if logdir is None: + logdir = f"logs/{datetime.now().strftime('%m-%d-%Y_%H-%M-%s')}" + if not os.path.exists(logdir): + os.makedirs(logdir) + + # run simulator + sim = subprocess.Popen([simulator, "--set_resolution", "320", "240"]) + + # run perception server + if kimera: + perception, perception_container_name = start_perception_subprocess( + "goseek-kimera" + ) + time.sleep(20) + + time.sleep(10) # wait for the simulator to open + + print("Running agent...") + + # Split up configuration + dirname, filename = os.path.split(os.path.abspath(config)) + p, submission_name = start_submission_subprocess(dirname, image, filename) + p.communicate() # wait for process to stop + + except Exception as ex: + print(f"CAUGHT EXCEPTION: {ex}") + + except KeyboardInterrupt: + print(f"Caught keyboard interrupt, exiting...") + + finally: + # cleanup + sim.terminate() + print("stopped simulator...") + + stdoutdata, stderrdata = get_docker_logs(submission_name) + metrics = extract_metrics(p, stdoutdata) + write_logs(f"{logdir}/submission-stdout.txt", stdoutdata) + + kill_docker_container(submission_name) + print("stopped submission...") + + if kimera: + log_perception_data(perception_container_name, logdir) + kill_docker_container(perception_container_name) + perception.terminate() + print("stopped perception pipeline...") + + return stdoutdata, stderrdata, metrics, p.returncode + if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -45,42 +258,18 @@ help="Use realistic perception for observations", action="store_true", ) - args = parser.parse_args() - # ---- perception mode - if args.perception: - raise NotImplementedError( - "This perception mode will be released in an update to the goseek-challenge soon. Please stay tuned!" - ) - - # ---- groundtruth mode - - # run simulator - sim = subprocess.Popen([args.simulator, "--set_resolution", "320", "240"]) - time.sleep(10) # wait for the simulator to open - - output = subprocess.check_output( - [ - "docker", - "run", - "--rm", - "--network=host", - "--gpus", - "all", - "-v" - # note to goseek-challenge participants: we will use an internal config for our challenge evaluations - "{}/config:/config".format(os.getcwd()), - "-t", - "{}".format(args.image), - "/bin/bash", - "-c", - "python eval.py --episode-config /config/ground-truth.yaml --agent-config agent.yaml", - ] - ) - print(output.decode("utf-8")) + episode_config = "{}/config/perception-pipeline.yaml".format(os.getcwd()) + kimera = True + else: # ground truth mode + episode_config = "{}/config/ground-truth.yaml".format(os.getcwd()) + kimera = False - # cleanup - sim.terminate() + stdoutdata, stderrdata, metrics, returncode = main( + args.simulator, kimera, args.image, episode_config + ) + print("\n***** Summary Metrics *****") + pprint.pprint(metrics)