Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding suport for reading .mat and .xml format annotations given by UA-DETRAC challenge. #53

Merged
merged 6 commits into from
Oct 31, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Release.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
- [pip install lapsolver]
- pip install .
- pytest
- deactivate motmetrics-env
- deactivate
- conda env remove -n motmetrics-env
- git add, commit
- git flow release finish <VERSION>
Expand Down
2 changes: 1 addition & 1 deletion motmetrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@


# Needs to be last line
__version__ = '1.1.2'
__version__ = '1.1.3'
105 changes: 105 additions & 0 deletions motmetrics/apps/eval_detrac.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.
Christoph Heindl, 2017
https://github.com/cheind/py-motmetrics
Author: Urwa Muaz
"""

import argparse
import glob
import os
import logging
import motmetrics as mm
import pandas as pd
from collections import OrderedDict
from pathlib import Path

def parse_args():
parser = argparse.ArgumentParser(description="""
Compute metrics for trackers using DETRAC challenge ground-truth data.
Files
-----
Ground truth files can be in .XML format or .MAT format as provided by http://detrac-db.rit.albany.edu/download
Test Files for the challenge are reuired to be in MOTchallenge format, they have to comply with the format described in
Milan, Anton, et al.
"Mot16: A benchmark for multi-object tracking."
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/
Directory Structure
---------
Layout for ground truth data
<GT_ROOT>/<SEQUENCE_1>.txt
<GT_ROOT>/<SEQUENCE_2>.txt
...
OR
<GT_ROOT>/<SEQUENCE_1>.mat
<GT_ROOT>/<SEQUENCE_2>.mat
...
Layout for test data
<TEST_ROOT>/<SEQUENCE_1>.txt
<TEST_ROOT>/<SEQUENCE_2>.txt
...
Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string.""", formatter_class=argparse.RawTextHelpFormatter)

parser.add_argument('groundtruths', type=str, help='Directory containing ground truth files.')
parser.add_argument('tests', type=str, help='Directory containing tracker result files')
parser.add_argument('--loglevel', type=str, help='Log level', default='info')
parser.add_argument('--gtfmt', type=str, help='Groundtruth data format', default='detrac-xml')
parser.add_argument('--tsfmt', type=str, help='Test data format', default='mot15-2D')
parser.add_argument('--solver', type=str, help='LAP solver to use')
return parser.parse_args()

def compare_dataframes(gts, ts):
accs = []
names = []
for k, tsacc in ts.items():
if k in gts:
logging.info('Comparing {}...'.format(k))
accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
names.append(k)
else:
logging.warning('No ground truth for {}, skipping.'.format(k))

return accs, names

if __name__ == '__main__':

args = parse_args()

loglevel = getattr(logging, args.loglevel.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: {} '.format(args.loglevel))
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')

if args.solver:
mm.lap.default_solver = args.solver

gtfiles = glob.glob(os.path.join(args.groundtruths, '*'))
tsfiles = glob.glob(os.path.join(args.tests, '*'))

logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
logging.info('Loading files.')

gt = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.gtfmt)) for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.tsfmt)) for f in tsfiles])

mh = mm.metrics.create()
accs, names = compare_dataframes(gt, ts)

logging.info('Running metrics')

summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
logging.info('Completed')
Binary file added motmetrics/data/iotest/detrac.mat
Binary file not shown.
41 changes: 41 additions & 0 deletions motmetrics/data/iotest/detrac.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
<?xml version="1.0" encoding="utf-8"?>
<sequence name="MVI_39031">
<sequence_attribute camera_state="unstable" sence_weather="sunny"/>
<ignored_region>
<box left="335.75" top="52.75" width="256.5" height="117.5"/>
<box left="0.5" top="296.75" width="223.75" height="120.5"/>
<box left="690.75" top="116.75" width="269.75" height="94.5"/>
</ignored_region>
<frame density="1" num="1">
<target_list>
<target id="1">
<box left="745.6" top="357.33" width="148.2" height="115.14"/>
<attribute orientation="222.06" speed="11.782" trajectory_length="336" truncation_ratio="0" vehicle_type="car"/>
</target>
</target_list>
</frame>
<frame density="1" num="2">
<target_list>
<target id="1">
<box left="739.2" top="350.51" width="145.21" height="111.29"/>
<attribute orientation="222.06" speed="11.782" trajectory_length="336" truncation_ratio="0" vehicle_type="car"/>
</target>
</target_list>
</frame>
<frame density="1" num="3">
<target_list>
<target id="1">
<box left="732.8" top="343.68" width="142.23" height="107.45"/>
<attribute orientation="222.06" speed="11.782" trajectory_length="336" truncation_ratio="0" vehicle_type="car"/>
</target>
</target_list>
</frame>
<frame density="1" num="4">
<target_list>
<target id="1">
<box left="726.4" top="336.85" width="139.24" height="103.62"/>
<attribute orientation="222.06" speed="11.782" trajectory_length="336" truncation_ratio="0" vehicle_type="car"/>
</target>
</target_list>
</frame>
</sequence>
137 changes: 136 additions & 1 deletion motmetrics/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
import pandas as pd
import numpy as np
import io
import scipy.io
import xmltodict

class Format(Enum):
"""Enumerates supported file formats."""
Expand All @@ -25,6 +27,17 @@ class Format(Enum):
https://github.com/cvondrick/vatic
"""

DETRAC_MAT = 'detrac-mat'
"""Wen, Longyin et al. "UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking." arXiv preprint arXiv:arXiv:1511.04136 (2016).
http://detrac-db.rit.albany.edu/download
"""

DETRAC_XML = 'detrac-xml'
"""Wen, Longyin et al. "UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking." arXiv preprint arXiv:arXiv:1511.04136 (2016).
http://detrac-db.rit.albany.edu/download
"""



def load_motchallenge(fname, **kwargs):
"""Load MOT challenge data.
Expand Down Expand Up @@ -160,14 +173,136 @@ def load_vatictxt(fname, **kwargs):

return df

def load_detrac_mat(fname, **kwargs):
"""Loads UA-DETRAC annotations data from mat files
Competition Site: http://detrac-db.rit.albany.edu/download

File contains a nested structure of 2d arrays for indexed by frame id
and Object ID. Separate arrays for top, left, width and height are given.

Params
------
fname : str
Filename to load data from

Kwargs
------
Currently none of these arguments used.

Returns
------
df : pandas.DataFrame
The returned dataframe has the following columns
'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'
The dataframe is indexed by ('FrameId', 'Id')
"""

matData = scipy.io.loadmat(fname)

frameList = matData['gtInfo'][0][0][4][0]
leftArray = matData['gtInfo'][0][0][0]
topArray = matData['gtInfo'][0][0][1]
widthArray = matData['gtInfo'][0][0][3]
heightArray = matData['gtInfo'][0][0][2]

parsedGT = []
for f in frameList:
ids = [i+1 for i,v in enumerate(leftArray[f-1]) if v>0]
for i in ids:
row = []
row.append(f)
row.append(i)
row.append(leftArray[f-1,i-1] - widthArray[f-1,i-1] / 2)
row.append(topArray[f-1,i-1] - heightArray[f-1,i-1])
row.append(widthArray[f-1,i-1])
row.append(heightArray[f-1,i-1])
row.append(1)
row.append(-1)
row.append(-1)
row.append(-1)
parsedGT.append(row)

df = pd.DataFrame(parsedGT,
columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'])
df.set_index(['FrameId', 'Id'],inplace=True)

# Account for matlab convention.
df[['X', 'Y']] -= (1, 1)

# Removed trailing column
del df['unused']

return df

def load_detrac_xml(fname, **kwargs):
"""Loads UA-DETRAC annotations data from xml files
Competition Site: http://detrac-db.rit.albany.edu/download

Params
------
fname : str
Filename to load data from

Kwargs
------
Currently none of these arguments used.

Returns
------
df : pandas.DataFrame
The returned dataframe has the following columns
'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'
The dataframe is indexed by ('FrameId', 'Id')
"""

with open(fname) as fd:
doc = xmltodict.parse(fd.read())
frameList = doc['sequence']['frame']

parsedGT = []
for f in frameList:
fid = int(f['@num'])
targetList = f['target_list']['target']
if type(targetList) != list:
targetList = [targetList]

for t in targetList:
row = []
row.append(fid)
row.append(int(t['@id']))
row.append(float(t['box']['@left']))
row.append(float(t['box']['@top']))
row.append(float(t['box']['@width']))
row.append(float(t['box']['@height']))
row.append(1)
row.append(-1)
row.append(-1)
row.append(-1)
parsedGT.append(row)

df = pd.DataFrame(parsedGT,
columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'])
df.set_index(['FrameId', 'Id'],inplace=True)

# Account for matlab convention.
df[['X', 'Y']] -= (1, 1)

# Removed trailing column
del df['unused']

return df


def loadtxt(fname, fmt=Format.MOT15_2D, **kwargs):
"""Load data from any known format."""
fmt = Format(fmt)

switcher = {
Format.MOT16: load_motchallenge,
Format.MOT15_2D: load_motchallenge,
Format.VATIC_TXT: load_vatictxt
Format.VATIC_TXT: load_vatictxt,
Format.DETRAC_MAT: load_detrac_mat,
Format.DETRAC_XML: load_detrac_xml
}
func = switcher.get(fmt)
return func(fname, **kwargs)
Expand Down
26 changes: 25 additions & 1 deletion motmetrics/tests/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,28 @@ def test_load_motchallenge():
(2,4,199,205,55,137,1,-1,-1),
])

assert (df.reset_index().values == expected.values).all()
assert (df.reset_index().values == expected.values).all()

def test_load_detrac_mat():
df = mm.io.loadtxt(os.path.join(DATA_DIR, 'iotest/detrac.mat'), fmt=mm.io.Format.DETRAC_MAT)

expected = pd.DataFrame([
( 1., 1., 745., 356., 148., 115., 1., -1., -1.),
( 2., 1., 738., 350., 145., 111., 1., -1., -1.),
( 3., 1., 732., 343., 142., 107., 1., -1., -1.),
( 4., 1., 725., 336., 139., 104., 1., -1., -1.)
])

assert (df.reset_index().values == expected.values).all()

def test_load_detrac_xml():
df = mm.io.loadtxt(os.path.join(DATA_DIR, 'iotest/detrac.xml'), fmt=mm.io.Format.DETRAC_XML)

expected = pd.DataFrame([
( 1. , 1. , 744.6 , 356.33, 148.2 , 115.14, 1. , -1. , -1. ),
( 2. , 1. , 738.2 , 349.51, 145.21, 111.29, 1. , -1. , -1. ),
( 3. , 1. , 731.8 , 342.68, 142.23, 107.45, 1. , -1. , -1. ),
( 4. , 1. , 725.4 , 335.85, 139.24, 103.62, 1. , -1. , -1. )
])

assert (df.reset_index().values == expected.values).all()
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
numpy>=1.12.1
pandas>=0.23.1
scipy>=0.19.0
xmltodict>=0.12.0