Skip to content

Commit

Permalink
Faster merging procedure, more detailed metrics analysis and more com…
Browse files Browse the repository at this point in the history
…patible for MOT16/17 (#38)

* more like official ver

* renew readme

* make same result as MATLAB

* rm useless

* faster distance matrix & faster IDF1 computing

* ...

* fix short result

* fix situation of nan

* fix nan

* version bump

* fix realease readme

* add idt ida

* fix IDt

* fix typo

* fix hypHis

* fix gt number

* speed up merge overall

* add skip func

* add special iou requirement

* fix old tests

* add tests for id s t a m

* rm false assertion

* change interface back to origin design

* fix row names
  • Loading branch information
Helicopt authored and cheind committed Apr 16, 2019
1 parent dfb728d commit 3f8f504
Show file tree
Hide file tree
Showing 12 changed files with 804 additions and 121 deletions.
8 changes: 7 additions & 1 deletion Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ While benchmarking single object trackers is rather straightforward, measuring t

<div style="text-align:center;">

![](motmetrics/etc/mot.png)<br/>
![](./motmetrics/etc/mot.png)<br/>

*Pictures courtesy of Bernardin, Keni, and Rainer Stiefelhagen [[1]](#References)*
</div>

Expand Down Expand Up @@ -103,6 +104,10 @@ You can compare tracker results to ground truth in MOTChallenge format by
```
python -m motmetrics.apps.eval_motchallenge --help
```
For MOT16/17, you can run
```
python -m motmetrics.apps.evaluateTracking --help
```

### Installation

Expand Down Expand Up @@ -450,6 +455,7 @@ docker run desired-image-name
MIT License
Copyright (c) 2017 Christoph Heindl
Copyright (c) 2018 Toka
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
164 changes: 164 additions & 0 deletions motmetrics/apps/evaluateTracking.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking with RESULT PREPROCESS.
TOKA, 2018
ORIGIN: https://github.com/cheind/py-motmetrics
EXTENDED: <reposity>
"""

import argparse
import glob
import os
import logging
import motmetrics as mm
import pandas as pd
from collections import OrderedDict
from pathlib import Path
import time
from tempfile import NamedTemporaryFile

def parse_args():
parser = argparse.ArgumentParser(description="""
Compute metrics for trackers using MOTChallenge ground-truth data with data preprocess.
Files
-----
All file content, ground truth and test files, have to comply with the
format described in
Milan, Anton, et al.
"Mot16: A benchmark for multi-object tracking."
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/
Structure
---------
Layout for ground truth data
<GT_ROOT>/<SEQUENCE_1>/gt/gt.txt
<GT_ROOT>/<SEQUENCE_2>/gt/gt.txt
...
Layout for test data
<TEST_ROOT>/<SEQUENCE_1>.txt
<TEST_ROOT>/<SEQUENCE_2>.txt
...
Seqmap for test data
[name]
<SEQUENCE_1>
<SEQUENCE_2>
...
Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string in the seqmap.""", formatter_class=argparse.RawTextHelpFormatter)

parser.add_argument('groundtruths', type=str, help='Directory containing ground truth files.')
parser.add_argument('tests', type=str, help='Directory containing tracker result files')
parser.add_argument('seqmap', type=str, help='Text file containing all sequences name')
parser.add_argument('--log', type=str, help='a place to record result and outputfile of mistakes', default='')
parser.add_argument('--loglevel', type=str, help='Log level', default='info')
parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D')
parser.add_argument('--solver', type=str, help='LAP solver to use')
parser.add_argument('--skip', type=int, default=0, help='skip frames n means choosing one frame for every (n+1) frames')
parser.add_argument('--iou', type=float, default=0.5, help='special IoU threshold requirement for small targets')
return parser.parse_args()

def compare_dataframes(gts, ts, vsflag = '', iou = 0.5):
accs = []
anas = []
names = []
for k, tsacc in ts.items():
if k in gts:
logging.info('Evaluating {}...'.format(k))
if vsflag!='':
fd = open(vsflag+'/'+k+'.log','w')
else:
fd = ''
acc, ana = mm.utils.CLEAR_MOT_M(gts[k][0], tsacc, gts[k][1], 'iou', distth=iou, vflag=fd)
if fd!='':
fd.close()
accs.append(acc)
anas.append(ana)
names.append(k)
else:
logging.warning('No ground truth for {}, skipping.'.format(k))

return accs, anas, names

def parseSequences(seqmap):
assert os.path.isfile(seqmap), 'Seqmap %s not found.'%seqmap
fd = open(seqmap)
res = []
for row in fd.readlines():
row = row.strip()
if row=='' or row=='name' or row[0]=='#': continue
res.append(row)
fd.close()
return res

def generateSkippedGT(gtfile, skip, fmt):
tf = NamedTemporaryFile(delete=False, mode='w')
with open(gtfile) as fd:
lines = fd.readlines()
for line in lines:
arr = line.strip().split(',')
fr = int(arr[0])
if fr%(skip+1)!=1:
continue
pos = line.find(',')
newline = str(fr//(skip+1)+1) + line[pos:]
tf.write(newline)
tf.close()
tempfile = tf.name
return tempfile


if __name__ == '__main__':

args = parse_args()

loglevel = getattr(logging, args.loglevel.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: {} '.format(args.loglevel))
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')

if args.solver:
mm.lap.default_solver = args.solver

seqs = parseSequences(args.seqmap)
gtfiles = [os.path.join(args.groundtruths, i, 'gt/gt.txt') for i in seqs]
tsfiles = [os.path.join(args.tests, '%s.txt'%i) for i in seqs]

for gtfile in gtfiles:
if not os.path.isfile(gtfile):
logging.error('gt File %s not found.'%gtfile)
exit(1)
for tsfile in tsfiles:
if not os.path.isfile(tsfile):
logging.error('res File %s not found.'%tsfile)
exit(1)

logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
for seq in seqs:
logging.info('\t%s'%seq)
logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
logging.info('Loading files.')

if args.skip>0 and 'mot' in args.fmt:
for i, gtfile in enumerate(gtfiles):
gtfiles[i] = generateSkippedGT(gtfile, args.skip, fmt=args.fmt)

gt = OrderedDict([(seqs[i], (mm.io.loadtxt(f, fmt=args.fmt), os.path.join(args.groundtruths, seqs[i], 'seqinfo.ini')) ) for i, f in enumerate(gtfiles)])
ts = OrderedDict([(seqs[i], mm.io.loadtxt(f, fmt=args.fmt)) for i, f in enumerate(tsfiles)])

mh = mm.metrics.create()
st = time.time()
accs, analysis, names = compare_dataframes(gt, ts, args.log, 1.-args.iou)
logging.info('adding frames: %.3f seconds.'%(time.time()-st))

logging.info('Running metrics')

summary = mh.compute_many(accs, anas = analysis, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
logging.info('Completed')
37 changes: 28 additions & 9 deletions motmetrics/distances.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.
Christoph Heindl, 2017
Toka, 2018
https://github.com/cheind/py-motmetrics
Fast implement by TOKA
"""

import numpy as np
Expand Down Expand Up @@ -47,6 +49,16 @@ def norm2squared_matrix(objs, hyps, max_d2=float('inf')):
C[C > max_d2] = np.nan
return C

def boxiou(a, b):
rx1 = max(a[0], b[0])
rx2 = min(a[0]+a[2], b[0]+b[2])
ry1 = max(a[1], b[1])
ry2 = min(a[1]+a[3], b[1]+b[3])
if ry2>ry1 and rx2>rx1:
i = (ry2-ry1)*(rx2-rx1)
u = a[2]*a[3]+b[2]*b[3]-i
return float(i)/u
else: return 0.0

def iou_matrix(objs, hyps, max_iou=1.):
"""Computes 'intersection over union (IoU)' distance matrix between object and hypothesis rectangles.
Expand Down Expand Up @@ -79,9 +91,10 @@ def iou_matrix(objs, hyps, max_iou=1.):
Distance matrix containing pairwise distances or np.nan.
"""

#import time
#st = time.time()
objs = np.atleast_2d(objs).astype(float)
hyps = np.atleast_2d(hyps).astype(float)

if objs.size == 0 or hyps.size == 0:
return np.empty((0,0))

Expand All @@ -95,16 +108,22 @@ def iou_matrix(objs, hyps, max_iou=1.):

for o in range(objs.shape[0]):
for h in range(hyps.shape[0]):
isect_xy = np.maximum(objs[o, :2], hyps[h, :2])
isect_wh = np.maximum(np.minimum(br_objs[o], br_hyps[h]) - isect_xy, 0)
isect_a = isect_wh[0]*isect_wh[1]
union_a = objs[o, 2]*objs[o, 3] + hyps[h, 2]*hyps[h, 3] - isect_a
if union_a != 0:
C[o, h] = 1. - isect_a / union_a
else:
#isect_xy = np.maximum(objs[o, :2], hyps[h, :2])
#isect_wh = np.maximum(np.minimum(br_objs[o], br_hyps[h]) - isect_xy, 0)
#isect_a = isect_wh[0]*isect_wh[1]
#union_a = objs[o, 2]*objs[o, 3] + hyps[h, 2]*hyps[h, 3] - isect_a
#if union_a != 0:
# C[o, h] = 1. - isect_a / union_a
#else:
# C[o, h] = np.nan
iou = boxiou(objs[o], hyps[h])
if 1 - iou > max_iou:
C[o, h] = np.nan
else:
C[o, h] = 1 - iou

C[C > max_iou] = np.nan
#C[C > max_iou] = np.nan
#print('----'*2,'done',time.time()-st)
return C


Expand Down
7 changes: 6 additions & 1 deletion motmetrics/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
Christoph Heindl, 2017
https://github.com/cheind/py-motmetrics
Modified by Toka, 2018
https://github.com/Helicopt/fast-py-MOTMetrics.git
"""

from enum import Enum
Expand Down Expand Up @@ -225,6 +227,9 @@ def render_summary(summary, formatters=None, namemap=None, buf=None):
'num_switches' : 'IDs',
'num_fragmentations' : 'FM',
'mota' : 'MOTA',
'motp' : 'MOTP'
'motp' : 'MOTP',
'num_transfer' : 'IDt',
'num_ascend' : 'IDa',
'num_migrate' : 'IDm',
}
"""A list mappings for metric names to comply with MOTChallenge."""
Loading

0 comments on commit 3f8f504

Please sign in to comment.