Skip to content

Commit

Permalink
first commit
Browse files Browse the repository at this point in the history
  • Loading branch information
lartpang committed Dec 2, 2024
0 parents commit 281d05c
Show file tree
Hide file tree
Showing 21 changed files with 1,949 additions and 0 deletions.
164 changes: 164 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
.vscode/
segment_anything/
segment_anything2/
output/
36 changes: 36 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# Evaluate SAM and SAM 2 with Diverse Prompts Towards Context-Dependent Concepts under Different Scenes

## Usage

### Prepare Datasets

See our ArXiv version for dataset details and set their paths in the config file.

> [!note]
> - Some datasets need to be preprocessed before use by the scripts in the folder [`preprocess`](./preprocess/):
> - Images in some datasets like CAD for Video COD, may not have corresponding annotations, and these images will not be used for model prediction and performance evaluation. So please clean them up in advance before the script is used.
### Prepare SAM and SAM 2

1. Install SAM:
1. `git clone https://github.com/facebookresearch/segment-anything.git`
2. `cd segment-anything`
3. `pip install -e .`
2. Install SAM 2:
1. `git clone https://github.com/facebookresearch/sam2.git`
2. `cd sam2`
3. `pip install -e .`
3. Download SAM and SAM 2 checkpoints and assign their paths to the items `sam-l` and `sam2-l` of the config file:
1. `vit_l` checkpoint from <https://github.com/facebookresearch/segment-anything?tab=readme-ov-file#model-checkpoints>.
1. url: <https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth>
2. `hiera_large` checkpoint from <https://github.com/facebookresearch/sam2?tab=readme-ov-file#sam-2-checkpoints>
1. url: <https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_large.pt>

### Generate Predictions

Run the corresponding commands (see [./run.sh](./run.sh)) to generate predictions for each task.

## Evaluation Tools

- <https://github.com/Xiaoqi-Zhao-DLUT/PySegMetric_EvalToolkit>
- <https://github.com/zhaoyuan1209/PyADMetric_EvalToolkit>
63 changes: 63 additions & 0 deletions config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
checkpoint:
"sam-l": "checkpoints/sam_vit_l_0b3195.pth"
"sam2-l": "checkpoints/sam2_hiera_large.pt"

dataset:
# image sod
"DUTS-TR":
"image":
"root": "ImageDataset/DUTS/DUTS-TR/DUTS-TR-Image"
"suffix": ".jpg"
"mask":
"root": "ImageDataset/DUTS/DUTS-TR/DUTS-TR-Mask"
"suffix": ".png"
"DUTS-TE":
"image":
"root": "ImageDataset/DUTS/DUTS-TE/DUTS-TE-Image"
"suffix": ".jpg"
"mask":
"root": "ImageDataset/DUTS/DUTS-TE/DUTS-TE-Mask"
"suffix": ".png"
"ECSSD":
"image":
"root": "ImageDataset/ECSSD/images"
"suffix": ".jpg"
"mask":
"root": "ImageDataset/ECSSD/ground_truth_mask"
"suffix": ".png"
# image camouflaged object detection
# image shadow detection
# image transparent object segmentation
# image industrial PBD
# image industrial AD
# image lesion object segmentation
# video polyp segmentation
# video salient object detection
"DAVIS16-Val":
"image":
"root": "VideoDataset/DAVIS16-Val/JPEGImages/1080p"
"subdir": ""
"suffix": ".jpg"
"mask":
"root": "VideoDataset/DAVIS16-Val/Annotations/1080p"
"subdir": ""
"suffix": ".png"
# video camouflaged object detection
"MoCA-Mask-TE":
"image":
"root": "VideoDataset/MoCA-Mask/MoCA_Video/TestDataset_per_sq"
"subdir": "Imgs"
"suffix": ".jpg"
"mask":
"root": "VideoDataset/MoCA-Mask/MoCA_Video/TestDataset_per_sq"
"subdir": "GT"
"suffix": ".png"
"CAD":
"image":
"root": "VideoDataset/CamouflagedAnimalDataset/original_data"
"subdir": "frames"
"suffix": ".png"
"mask":
"root": "VideoDataset/CamouflagedAnimalDataset/converted_mask"
"subdir": "groundtruth"
"suffix": ".png"
25 changes: 25 additions & 0 deletions preprocess/BTAD.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import glob
import os
import shutil

import cv2
import numpy as np
import tqdm

root = "data/industral/BTAD/VTADL/btad/BTech_Dataset_transformed/"
save = "converted_data/btad"
os.makedirs(os.path.join(save, "images"), exist_ok=True)
os.makedirs(os.path.join(save, "gt"), exist_ok=True)

for p in tqdm.tqdm(glob.glob(f"{root}/*/test/*/*")):
gt_path = p.replace("test", "ground_truth")
if "/01/" in gt_path:
gt_path = gt_path.replace(".bmp", ".png")
p_list = p.split(os.path.sep)
save_name = p_list[-4] + "_" + p_list[-2] + "_" + p_list[-1][:-4] + ".png"
shutil.copy(p, os.path.join(save, "images", save_name))
if "ok" in gt_path:
H, W, _ = cv2.imread(p).shape
cv2.imwrite(os.path.join(save, "gt", save_name), np.zeros((H, W)))
else:
shutil.copy(gt_path, os.path.join(save, "gt", save_name))
67 changes: 67 additions & 0 deletions preprocess/BraTS2020.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import glob
import os
import random

import cv2
import nibabel as nib
import numpy as np
import tqdm


def nii2pngs(nii_path):
nii_data = nib.load(nii_path)
img_data = nii_data.get_fdata()

res = []
for i in range(img_data.shape[2]):
slice_data = img_data[:, :, i]
# minmax
slice_data = (slice_data - np.min(slice_data)) / (np.max(slice_data) - np.min(slice_data))
slice_data = (slice_data * 255).astype(np.uint8)
res.append(slice_data)
return res


root = "data/Brats2020/MICCAI_BraTS2020_TrainingData"
save_root = "converted_data/brats/brats_2020"

all_segs = glob.glob(f"{root}/*/*_seg.nii")
all_segs = random.sample(all_segs, k=37)
for seg_cls in ["WT", "ET", "TC"]:
for moda in ["flair", "t1", "t1ce", "t2"]:
save = save_root + "_" + moda + "_" + seg_cls

paths = []
for gt_path in tqdm.tqdm(all_segs):
p = gt_path.replace("seg", moda)
p_list = p.split(os.path.sep)
save_name = p_list[-1]

gts = nii2pngs(gt_path)
gts = np.stack(gts)
if seg_cls == "WT":
gts = (gts > 0) * 255
elif seg_cls == "ET":
gts = (gts == 255) * 255
elif seg_cls == "TC":
gts[gts == 63] = 255
gts = (gts == 255) * 255

images = nii2pngs(p)
images = np.stack(images)
index = np.argmax(gts.sum((1, 2)), axis=0)
for i, (gt, img) in enumerate(zip(gts[: index + 1][::-1], images[: index + 1][::-1])):
if i == 0:
paths.append(os.path.join(save, "gt", save_name + "_former"))
os.makedirs(os.path.join(save, "gt", save_name + "_former"), exist_ok=True)
os.makedirs(os.path.join(save, "videos", save_name + "_former"), exist_ok=True)
cv2.imwrite(os.path.join(save, "gt", save_name + "_former", f"{i}.jpg".zfill(10)), gt)
cv2.imwrite(os.path.join(save, "videos", save_name + "_former", f"{i}.jpg".zfill(10)), img)

for i, (gt, img) in enumerate(zip(gts[index:], images[index:])):
os.makedirs(os.path.join(save, "gt", save_name + "_latter"), exist_ok=True)
os.makedirs(os.path.join(save, "videos", save_name + "_latter"), exist_ok=True)
cv2.imwrite(os.path.join(save, "gt", save_name + "_latter", f"{i}.jpg".zfill(10)), gt)
cv2.imwrite(os.path.join(save, "videos", save_name + "_latter", f"{i}.jpg".zfill(10)), img)

print(len(os.listdir(os.path.join(save, "videos"))))
21 changes: 21 additions & 0 deletions preprocess/DAVIS16.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import os
import shutil

data_list = "F:/1-SemanticSegmentation/DAVIS-data/DAVIS/ImageSets/1080p/val.txt"
data_root = "F:/1-SemanticSegmentation/DAVIS-data/DAVIS"
save_root = "F:/1-SemanticSegmentation/DAVIS-data/DAVIS/DAVIS16-Val"

for line in open(data_list, mode="r", encoding="utf-8"):
image_sub_path, mask_sub_path = line.strip().split()
image_sub_path = image_sub_path[1:]
mask_sub_path = mask_sub_path[1:]
image_path = os.path.join(data_root, image_sub_path)
mask_paht = os.path.join(data_root, mask_sub_path)

new_image_path = os.path.join(save_root, image_sub_path)
os.makedirs(os.path.dirname(new_image_path), exist_ok=True)
shutil.copy(image_path, new_image_path)

new_mask_path = os.path.join(save_root, mask_sub_path)
os.makedirs(os.path.dirname(new_mask_path), exist_ok=True)
shutil.copy(mask_paht, new_mask_path)
Loading

0 comments on commit 281d05c

Please sign in to comment.