From 370ce1baa3cd1576153116feb74b38b96622f3a6 Mon Sep 17 00:00:00 2001 From: Misha Date: Wed, 20 Sep 2023 09:37:43 +0000 Subject: [PATCH 01/11] make proper python package --- .gitignore | 160 +++++++++++++++++++++++++++++++++++++++++++++++++++++ setup.py | 8 +++ 2 files changed, 168 insertions(+) create mode 100644 .gitignore create mode 100644 setup.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6769e21 --- /dev/null +++ b/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..0d3f5cb --- /dev/null +++ b/setup.py @@ -0,0 +1,8 @@ +from distutils.core import setup + +setup(name='doppelgangers', + version='0.1', + description='Doppelgangers: Learning to Disambiguate Images of Similar Structures', + author='Ruojin Cai', + packages=['doppelgangers'], + ) \ No newline at end of file From b38c26a25ee5166337980f239044e4d9b0b4e338 Mon Sep 17 00:00:00 2001 From: Misha Date: Wed, 20 Sep 2023 09:38:09 +0000 Subject: [PATCH 02/11] read data of hloc --- doppelgangers/datasets/hloc_dataset.py | 102 +++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 doppelgangers/datasets/hloc_dataset.py diff --git a/doppelgangers/datasets/hloc_dataset.py b/doppelgangers/datasets/hloc_dataset.py new file mode 100644 index 0000000..1906f45 --- /dev/null +++ b/doppelgangers/datasets/hloc_dataset.py @@ -0,0 +1,102 @@ +import os.path as osp +import numpy as np +import torch +import torch.nn.functional as F +from torch.utils.data import Dataset +import cv2 +from ..utils.dataset import read_loftr_matches + +class HlocDoppelgangersDataset(Dataset): + def __init__(self, + image_dir, + matches_file, + features_file, + pair_path, + img_size, + **kwargs): + """ + Doppelgangers test dataset: loading images and loftr matches for Doppelgangers model. + + Args: + image_dir (str): root directory for images. + loftr_match_dir (str): root directory for loftr matches. + pair_path (str): pair_list.npy path. This contains image pair information. + img_resize (int, optional): the longer edge of resized images. None for no resize. 640 is recommended. + This is useful during training with batches and testing with memory intensive algorithms. + """ + super().__init__() + self.image_dir = image_dir + self.matches_f = matches_file + self.features_f = features_file + self.pairs_info = [] + for i1 in matches_file.keys(): + for i2 in matches_file[i1].keys(): + self.pairs_info.append((i1, i2)) + + self.img_size = img_size + + + def __len__(self): + return len(self.pairs_info) + + def __getitem__(self, idx): + image_1_name, image_2_name = self.pairs_info[idx] + + features1 = self.features_f[image_1_name] + features2 = self.features_f[image_2_name] + keypoints1 = np.array(features1['keypoints']) + keypoints2 = np.array(features2['keypoints']) + matches_data = self.matches_f[image_1_name][image_2_name] + matches = np.array(matches_data['matches']) + conf = np.array(matches_data['scores']) + keypoints1 = keypoints1[matches[..., 0]].astype(np.int32) + keypoints2 = keypoints2[matches[..., 1]].astype(np.int32) + + if np.sum(conf>0.8) == 0: + matches = None + else: + F, mask = cv2.findFundamentalMat(keypoints1[conf>0.8],keypoints2[conf>0.8],cv2.FM_RANSAC, 3, 0.99) + if mask is None or F is None: + matches = None + else: + matches = np.array(np.ones((keypoints1.shape[0], 2)) * np.arange(keypoints1.shape[0]).reshape(-1,1)).astype(int)[conf>0.8][mask.ravel()==1] + + img_name1 = osp.join(self.image_dir, image_1_name) + img_name2 = osp.join(self.image_dir, image_2_name) + + image = read_loftr_matches(img_name1, img_name2, self.img_size, 8, True, keypoints1, keypoints2, matches, warp=True, conf=conf) + + return { + 'image': image, + 'image1_name': image_1_name, + 'image2_name': image_2_name, + } + +def get_datasets(cfg): + te_dataset = HlocDoppelgangersDataset( + cfg.image_dir, + cfg.matches_file, + cfg.features_file, + cfg.test.pair_path, + img_size=getattr(cfg.test, "img_size", 640)) + + return te_dataset + + +def init_np_seed(worker_id): + seed = torch.initial_seed() + np.random.seed(seed % 4294967296) + + +def get_data_loaders(cfg): + te_dataset = get_datasets(cfg) + test_loader = torch.utils.data.DataLoader( + dataset=te_dataset, batch_size=cfg.test.batch_size, + shuffle=False, num_workers=cfg.num_workers, drop_last=False, + worker_init_fn=init_np_seed) + + loaders = { + "test_loader": test_loader, + } + return loaders + From 33bdb70947fa354a074f46abda8e5af47b6418c7 Mon Sep 17 00:00:00 2001 From: Misha Date: Wed, 20 Sep 2023 09:39:48 +0000 Subject: [PATCH 03/11] inference on hloc --- inference_on_hloc.py | 83 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 inference_on_hloc.py diff --git a/inference_on_hloc.py b/inference_on_hloc.py new file mode 100644 index 0000000..ed36f43 --- /dev/null +++ b/inference_on_hloc.py @@ -0,0 +1,83 @@ +from types import SimpleNamespace + +from tqdm import tqdm +from doppelgangers.models.cnn_classifier import decoder +from doppelgangers.datasets.hloc_dataset import HlocDoppelgangersDataset +import torch +import numpy as np +from torch.utils.data import DataLoader +import copy +import h5py +import argparse + +def get_args(): + # command line args + parser = argparse.ArgumentParser(description='Structure from Motion disambiguation with Doppelgangers classification model.') + + # colmap setting + parser.add_argument('--weights_path', type=str, + help="Path to classifier weights") + parser.add_argument('--features_path', type=str, + help="path to hloc features HDF5 file") + parser.add_argument('--matches_path', type=str, + help="path to hloc matches HDF5 file") + parser.add_argument('--filtered_path', type=str, + help="path to hloc matches HDF5 file") + parser.add_argument('--image_dir', type=str, + help="path to where images are stored") + parser.add_argument('--pairs_path', type=str, + help="pairs_path path to sfm-pairs.txt") + parser.add_argument('--batch_size', type=int, default=16, + help="batch size") + + args = parser.parse_args() + return args + + +args = get_args() +weights_path = args.weights_path +features_file = args.features_path +matches_file = args.matches_path +sfm_filtered = args.filtered_path +image_dir = args.image_dir +pair_path = args.pairs_path +batch_size = args.batch_size + +multi_gpu = False +model = decoder(cfg=SimpleNamespace(input_dim=10)) +ckpt = torch.load(weights_path) +new_ckpt = copy.deepcopy(ckpt['dec']) + +for key, value in ckpt['dec'].items(): + if 'module.' in key: + new_ckpt[key[len('module.'):]] = new_ckpt.pop(key) + +model.load_state_dict(new_ckpt, strict=True) +model = model.cuda().eval() + +# weights_path = '/files/weights/doppelgangers_classifier_loftr.pt' +# features_file='/outputs/features.h5' +# matches_file='/outputs/matches.h5' +# sfm_filtered='/outputs/pairs-sfm-filtered.txt' +# image_dir='/images', +# pair_path='/pairs-sfm.txt', + +with h5py.File(features_file, 'r') as features_f, h5py.File(matches_file, 'r') as matches_f, open(sfm_filtered, 'w') as filterd_f: + test_loader = DataLoader( + dataset=HlocDoppelgangersDataset( + img_size=640, + image_dir=image_dir, + pair_path=pair_path, + features_file=features_f, + matches_file=matches_f + ), + batch_size=batch_size, + shuffle=False, num_workers=8, drop_last=False) + + for b in tqdm(test_loader): + with torch.no_grad(): + scores = model(b['image'].cuda()).detach().cpu().numpy() + for i1, i2, score in zip(b['image1_name'], b['image2_name'], scores): + score_argmax = np.argmax(score) + filterd_f.write(f"{i1} {i2} {score_argmax}\n") + From 25261cb8963203e4195b9a866749ba633a9e98dd Mon Sep 17 00:00:00 2001 From: Misha Date: Wed, 20 Sep 2023 09:54:43 +0000 Subject: [PATCH 04/11] move inference inside package --- doppelgangers/utils/inference_on_hloc.py | 105 +++++++++++++++++++++++ inference_on_hloc.py | 83 ------------------ 2 files changed, 105 insertions(+), 83 deletions(-) create mode 100644 doppelgangers/utils/inference_on_hloc.py delete mode 100644 inference_on_hloc.py diff --git a/doppelgangers/utils/inference_on_hloc.py b/doppelgangers/utils/inference_on_hloc.py new file mode 100644 index 0000000..04bdad1 --- /dev/null +++ b/doppelgangers/utils/inference_on_hloc.py @@ -0,0 +1,105 @@ +from email.mime import image +from types import SimpleNamespace + +from tqdm import tqdm +from doppelgangers.models.cnn_classifier import decoder +from doppelgangers.datasets.hloc_dataset import HlocDoppelgangersDataset +import torch +import numpy as np +from torch.utils.data import DataLoader +import copy +import h5py +import argparse + +def get_args(): + # command line args + parser = argparse.ArgumentParser(description='Structure from Motion disambiguation with Doppelgangers classification model.') + + # colmap setting + parser.add_argument('--weights_path', type=str, + help="Path to classifier weights") + parser.add_argument('--features_path', type=str, + help="path to hloc features HDF5 file") + parser.add_argument('--matches_path', type=str, + help="path to hloc matches HDF5 file") + parser.add_argument('--filtered_path', type=str, + help="path to hloc matches HDF5 file") + parser.add_argument('--image_dir', type=str, + help="path to where images are stored") + parser.add_argument('--pairs_path', type=str, + help="pairs_path path to sfm-pairs.txt") + parser.add_argument('--batch_size', type=int, default=16, + help="batch size") + + args = parser.parse_args() + return args + + +def main( + weights_path, + features_file, + matches_file, + sfm_filtered, + image_dir, + pair_path, + batch_size, +): + model = decoder(cfg=SimpleNamespace(input_dim=10)) + ckpt = torch.load(weights_path) + new_ckpt = copy.deepcopy(ckpt['dec']) + + for key, value in ckpt['dec'].items(): + if 'module.' in key: + new_ckpt[key[len('module.'):]] = new_ckpt.pop(key) + + model.load_state_dict(new_ckpt, strict=True) + model = model.cuda().eval() + + # weights_path = '/files/weights/doppelgangers_classifier_loftr.pt' + # features_file='/outputs/features.h5' + # matches_file='/outputs/matches.h5' + # sfm_filtered='/outputs/pairs-sfm-filtered.txt' + # image_dir='/images', + # pair_path='/pairs-sfm.txt', + + with h5py.File(features_file, 'r') as features_f, h5py.File(matches_file, 'r') as matches_f, open(sfm_filtered, 'w') as filterd_f: + test_loader = DataLoader( + dataset=HlocDoppelgangersDataset( + img_size=640, + image_dir=image_dir, + pair_path=pair_path, + features_file=features_f, + matches_file=matches_f + ), + batch_size=batch_size, + shuffle=False, num_workers=8, drop_last=False) + + for b in tqdm(test_loader): + with torch.no_grad(): + scores = model(b['image'].cuda()).detach().cpu().numpy() + for i1, i2, score in zip(b['image1_name'], b['image2_name'], scores): + score_argmax = np.argmax(score) + filterd_f.write(f"{i1} {i2} {score_argmax}\n") + + +if __name__ == "__main__": + args = get_args() + + weights_path = args.weights_path + features_file = args.features_path + matches_file = args.matches_path + sfm_filtered = args.filtered_path + image_dir = args.image_dir + pair_path = args.pairs_path + batch_size = args.batch_size + + main( + weights_path=weights_path, + features_file=features_file, + matches_file=matches_file, + sfm_filtered=sfm_filtered, + image_dir=image_dir, + pair_path=pair_path, + batch_size=batch_size, + ) + diff --git a/inference_on_hloc.py b/inference_on_hloc.py deleted file mode 100644 index ed36f43..0000000 --- a/inference_on_hloc.py +++ /dev/null @@ -1,83 +0,0 @@ -from types import SimpleNamespace - -from tqdm import tqdm -from doppelgangers.models.cnn_classifier import decoder -from doppelgangers.datasets.hloc_dataset import HlocDoppelgangersDataset -import torch -import numpy as np -from torch.utils.data import DataLoader -import copy -import h5py -import argparse - -def get_args(): - # command line args - parser = argparse.ArgumentParser(description='Structure from Motion disambiguation with Doppelgangers classification model.') - - # colmap setting - parser.add_argument('--weights_path', type=str, - help="Path to classifier weights") - parser.add_argument('--features_path', type=str, - help="path to hloc features HDF5 file") - parser.add_argument('--matches_path', type=str, - help="path to hloc matches HDF5 file") - parser.add_argument('--filtered_path', type=str, - help="path to hloc matches HDF5 file") - parser.add_argument('--image_dir', type=str, - help="path to where images are stored") - parser.add_argument('--pairs_path', type=str, - help="pairs_path path to sfm-pairs.txt") - parser.add_argument('--batch_size', type=int, default=16, - help="batch size") - - args = parser.parse_args() - return args - - -args = get_args() -weights_path = args.weights_path -features_file = args.features_path -matches_file = args.matches_path -sfm_filtered = args.filtered_path -image_dir = args.image_dir -pair_path = args.pairs_path -batch_size = args.batch_size - -multi_gpu = False -model = decoder(cfg=SimpleNamespace(input_dim=10)) -ckpt = torch.load(weights_path) -new_ckpt = copy.deepcopy(ckpt['dec']) - -for key, value in ckpt['dec'].items(): - if 'module.' in key: - new_ckpt[key[len('module.'):]] = new_ckpt.pop(key) - -model.load_state_dict(new_ckpt, strict=True) -model = model.cuda().eval() - -# weights_path = '/files/weights/doppelgangers_classifier_loftr.pt' -# features_file='/outputs/features.h5' -# matches_file='/outputs/matches.h5' -# sfm_filtered='/outputs/pairs-sfm-filtered.txt' -# image_dir='/images', -# pair_path='/pairs-sfm.txt', - -with h5py.File(features_file, 'r') as features_f, h5py.File(matches_file, 'r') as matches_f, open(sfm_filtered, 'w') as filterd_f: - test_loader = DataLoader( - dataset=HlocDoppelgangersDataset( - img_size=640, - image_dir=image_dir, - pair_path=pair_path, - features_file=features_f, - matches_file=matches_f - ), - batch_size=batch_size, - shuffle=False, num_workers=8, drop_last=False) - - for b in tqdm(test_loader): - with torch.no_grad(): - scores = model(b['image'].cuda()).detach().cpu().numpy() - for i1, i2, score in zip(b['image1_name'], b['image2_name'], scores): - score_argmax = np.argmax(score) - filterd_f.write(f"{i1} {i2} {score_argmax}\n") - From dd927e28fcf1b2b6c072383f131c64c91bbab27a Mon Sep 17 00:00:00 2001 From: Misha Date: Wed, 20 Sep 2023 10:02:53 +0000 Subject: [PATCH 05/11] overwrite hloc database --- doppelgangers/utils/overwrite_hloc.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 doppelgangers/utils/overwrite_hloc.py diff --git a/doppelgangers/utils/overwrite_hloc.py b/doppelgangers/utils/overwrite_hloc.py new file mode 100644 index 0000000..2474c48 --- /dev/null +++ b/doppelgangers/utils/overwrite_hloc.py @@ -0,0 +1,21 @@ +import h5py +from tqdm import tqdm + +#sfm_filtered='/outputs/pairs-sfm-filtered.txt' +#pair_path='/outputs/pairs-sfm.txt' +#matches_file='/outputs/matches.h5' + +def main(sfm_filtered, pair_path, matches_file): + with open(sfm_filtered) as filtered_f, open(pair_path, 'w') as orig_f, h5py.File(matches_file, 'r+') as matches_f: + total = 0 + filtered = 0 + for l in tqdm(filtered_f): + total += 1 + i1, i2, res = l.split() + res =int(res) + if not res: + del matches_f[i1][i2] + filtered += 1 + else: + orig_f.write(f"{i1} {i2} \n") + print(f"Doppelgangers Filtered {filtered / total * 100 :.2f}%") \ No newline at end of file From 06cdbc5ec0a0ab1fa085b71d7736a4b811fc76b9 Mon Sep 17 00:00:00 2001 From: Misha Date: Wed, 20 Sep 2023 10:20:11 +0000 Subject: [PATCH 06/11] readme update --- README.md | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/README.md b/README.md index a5abcce..18a481c 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,85 @@ python train.py doppelgangers/configs/training_configs/doppelgangers_classifier_ python train_multi_gpu.py doppelgangers/configs/training_configs/doppelgangers_classifier_flip.yaml ``` +## Usage with [HLOC](https://github.com/cvg/Hierarchical-Localization) + +**You will need to have some additional data in your matches.h5** + +**I have created [this fork of HLOC](), it hopefully will later be merged** + +```python +sfm_pairs = outputs / "pairs-sfm.txt" +sfm_pairs_filtered = outputs / "pairs-sfm-filtered.txt" +features = outputs / "features.h5" +vlad_features = outputs / "vlad.h5" +matches = outputs / "matches.h5" + +ref_dir = outputs / "ref" +dense_conf_json = outputs / "dense_conf.json" + +feature_conf = extract_features.confs["superpoint_aachen"] +matcher_conf = match_features.confs["superpoint+lightglue"] + +# Global Features +extract_features.main( + extract_features.confs["netvlad"], + images, + outputs, + image_list=references, + feature_path=vlad_features, +) + +# Local sparse features +extract_features.main( + feature_conf, images, + image_list=references, + feature_path=features +) + +pairs_from_retrieval.main( + descriptors=vlad_features, + output=sfm_pairs, + num_matched=16, + query_list=references, +) + +# Matching features +match_features.main( + matcher_conf, + sfm_pairs, + features=features, + matches=matches +) + +# Doppelganger usage +doppelganger_removal.main( + weights_path='...', + features_file=features, + matches_file=matches, + sfm_filtered=sfm_pairs_filtered, + image_dir=images, + pair_path=sfm_pairs, + batch_size=16 +) + +doppelganger_overwrite.main( + sfm_filtered=sfm_pairs_filtered, + pair_path=sfm_pairs, + matches_file=matches +) + +# SFM +sfm = PixSfM(conf="low_memory") +sfm.reconstruction( + ref_dir, + images, + sfm_pairs, + features, + matches, + image_list=references, +) +``` + ## Citation ``` @inproceedings{cai2023doppelgangers, From 6acb0c5559b709268b75d3a2066f6a1abd6f3617 Mon Sep 17 00:00:00 2001 From: Misha Date: Wed, 20 Sep 2023 10:32:19 +0000 Subject: [PATCH 07/11] add import --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 7fec9d1..4798653 100644 --- a/README.md +++ b/README.md @@ -143,6 +143,8 @@ python train_multi_gpu.py doppelgangers/configs/training_configs/doppelgangers_c **I have created [this fork of HLOC](), it hopefully will later be merged** ```python +from doppelgangers.utils import inference_on_hloc as doppelganger_removal, + overwrite_hloc as doppelganger_overwrite sfm_pairs = outputs / "pairs-sfm.txt" sfm_pairs_filtered = outputs / "pairs-sfm-filtered.txt" features = outputs / "features.h5" From 9b7da7b1575849f636aba34811d3b96e578e6a7f Mon Sep 17 00:00:00 2001 From: Mikhail Scherbina <42784580+awarebayes@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:40:13 +0300 Subject: [PATCH 08/11] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4798653..bd28fb7 100644 --- a/README.md +++ b/README.md @@ -140,7 +140,7 @@ python train_multi_gpu.py doppelgangers/configs/training_configs/doppelgangers_c **You will need to have some additional data in your matches.h5** -**I have created [this fork of HLOC](), it hopefully will later be merged** +**I have created [this fork of HLOC](git@github.com:awarebayes/Hierarchical-Localization.git), it hopefully will later be merged** ```python from doppelgangers.utils import inference_on_hloc as doppelganger_removal, From 185762e1877951789e7724a976e637af18b332a5 Mon Sep 17 00:00:00 2001 From: Mikhail Scherbina <42784580+awarebayes@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:41:01 +0300 Subject: [PATCH 09/11] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bd28fb7..fc41a4a 100644 --- a/README.md +++ b/README.md @@ -140,7 +140,7 @@ python train_multi_gpu.py doppelgangers/configs/training_configs/doppelgangers_c **You will need to have some additional data in your matches.h5** -**I have created [this fork of HLOC](git@github.com:awarebayes/Hierarchical-Localization.git), it hopefully will later be merged** +**I have created [this fork of HLOC](https://github.com/awarebayes/doppelgangers-hloc.git), it hopefully will later be merged** ```python from doppelgangers.utils import inference_on_hloc as doppelganger_removal, From feb7e9c57df5b6a66242f72ca8ad37593b40ddd7 Mon Sep 17 00:00:00 2001 From: Mikhail Scherbina <42784580+awarebayes@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:41:39 +0300 Subject: [PATCH 10/11] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index fc41a4a..4d79033 100644 --- a/README.md +++ b/README.md @@ -140,7 +140,7 @@ python train_multi_gpu.py doppelgangers/configs/training_configs/doppelgangers_c **You will need to have some additional data in your matches.h5** -**I have created [this fork of HLOC](https://github.com/awarebayes/doppelgangers-hloc.git), it hopefully will later be merged** +**I have created [this fork of HLOC](https://github.com/awarebayes/Hierarchical-Localization), it hopefully will later be merged** ```python from doppelgangers.utils import inference_on_hloc as doppelganger_removal, From 2ad06bb5ad355a9abb0af8621c0a41421c2c7e07 Mon Sep 17 00:00:00 2001 From: Mikhail Scherbina <42784580+awarebayes@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:42:26 +0300 Subject: [PATCH 11/11] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4d79033..707f7ad 100644 --- a/README.md +++ b/README.md @@ -140,7 +140,7 @@ python train_multi_gpu.py doppelgangers/configs/training_configs/doppelgangers_c **You will need to have some additional data in your matches.h5** -**I have created [this fork of HLOC](https://github.com/awarebayes/Hierarchical-Localization), it hopefully will later be merged** +**I have created [this fork of HLOC](https://github.com/awarebayes/Hierarchical-Localization/tree/doppelgangers-integration-1), it hopefully will later be merged** ```python from doppelgangers.utils import inference_on_hloc as doppelganger_removal,