Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Object detection net(eg:faster_rcnn_resnet101) not worked with deconv_visualization #59

Closed
jidebingfeng opened this issue Jul 25, 2018 · 14 comments

Comments

@jidebingfeng
Copy link

jidebingfeng commented Jul 25, 2018

Object detection net(eg:faster_rcnn_resnet101) not worked with deconv_visualization.
With activation_visualization works well.
Error:

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-4-9aba9f0fced7> in <module>()
      3                                      layers=layers,
      4                                      path_logdir=os.path.join("Log","Inception5"),
----> 5                                      path_outdir=os.path.join("Output","Inception5"))

/notebooks/workspace/github/tf_cnnvis/tf_cnnvis/tf_cnnvis.py in deconv_visualization(sess_graph_path, value_feed_dict, input_tensor, layers, path_logdir, path_outdir)
    408 def deconv_visualization(sess_graph_path, value_feed_dict, input_tensor = None,  layers = 'r', path_logdir = './Log', path_outdir = "./Output"):
    409     is_success = _get_visualization(sess_graph_path, value_feed_dict, input_tensor = input_tensor, layers = layers, method = "deconv",
--> 410         path_logdir = path_logdir, path_outdir = path_outdir)
    411     return is_success
    412 

/notebooks/workspace/github/tf_cnnvis/tf_cnnvis/tf_cnnvis.py in _get_visualization(sess_graph_path, value_feed_dict, input_tensor, layers, path_logdir, path_outdir, method)
    167                 elif layer != None and layer.lower() in dict_layer.keys():
    168                     layer_type = dict_layer[layer.lower()]
--> 169                     is_success = _visualization_by_layer_type(g, value_feed_dict, input_tensor, layer_type, method, path_logdir, path_outdir)
    170                 else:
    171                     print("Skipping %s . %s is not valid layer name or layer type" % (layer, layer))

/notebooks/workspace/github/tf_cnnvis/tf_cnnvis/tf_cnnvis.py in _visualization_by_layer_type(graph, value_feed_dict, input_tensor, layer_type, method, path_logdir, path_outdir)
    225 
    226     for layer in layers:
--> 227         is_success = _visualization_by_layer_name(graph, value_feed_dict, input_tensor, layer, method, path_logdir, path_outdir)
    228     return is_success
    229 

/notebooks/workspace/github/tf_cnnvis/tf_cnnvis/tf_cnnvis.py in _visualization_by_layer_name(graph, value_feed_dict, input_tensor, layer_name, method, path_logdir, path_outdir)
    289         elif method == "deconv":
    290             # deconvolution
--> 291             results = _deconvolution(graph, sess, op_tensor, X, feed_dict)
    292         elif method == "deepdream":
    293             # deepdream

/notebooks/workspace/github/tf_cnnvis/tf_cnnvis/tf_cnnvis.py in _deconvolution(graph, sess, op_tensor, X, feed_dict)
    335                         c += 1
    336                 if c > 0:
--> 337                     out.extend(sess.run(reconstruct[:c], feed_dict = feed_dict))
    338     return out
    339 def _deepdream(graph, sess, op_tensor, X, feed_dict, layer, path_outdir, path_logdir):

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    898     try:
    899       result = self._run(None, fetches, feed_dict, options_ptr,
--> 900                          run_metadata_ptr)
    901       if run_metadata:
    902         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1118     # Create a fetch handler to take care of the structure of fetches.
   1119     fetch_handler = _FetchHandler(
-> 1120         self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
   1121 
   1122     # Run request and get response.

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in __init__(self, graph, fetches, feeds, feed_handles)
    425     """
    426     with graph.as_default():
--> 427       self._fetch_mapper = _FetchMapper.for_fetch(fetches)
    428     self._fetches = []
    429     self._targets = []

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in for_fetch(fetch)
    243     elif isinstance(fetch, (list, tuple)):
    244       # NOTE(touts): This is also the code path for namedtuples.
--> 245       return _ListFetchMapper(fetch)
    246     elif isinstance(fetch, dict):
    247       return _DictFetchMapper(fetch)

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in __init__(self, fetches)
    350     """
    351     self._fetch_type = type(fetches)
--> 352     self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
    353     self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
    354 

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in <listcomp>(.0)
    350     """
    351     self._fetch_type = type(fetches)
--> 352     self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
    353     self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
    354 

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in for_fetch(fetch)
    240     if fetch is None:
    241       raise TypeError('Fetch argument %r has invalid type %r' % (fetch,
--> 242                                                                  type(fetch)))
    243     elif isinstance(fetch, (list, tuple)):
    244       # NOTE(touts): This is also the code path for namedtuples.

TypeError: Fetch argument None has invalid type <class 'NoneType'>

Addition:

Variable reconstruct (defined at line 327 in tf_cnnvis.py ) is [None, None, None, None, None, None, None, None]. TensorFlow version is 1.9. When I use TensorFlow 1.4, I got same Error as #26

@wildpig22
Copy link

What is your input_sensor set to?

Not familiar with faster rcnn, but with mobilenet ssd (from tensorflow object detection model zoo), I got the same error when using image_tensor as input, switching to FeatureExtractor/MobilenetV2/MobilenetV2/input will work.

You can try skipping the pre-processing blocks and set input_tensor to the input of feature extractor block(or whatever the actual input is). You will need to manually do the pre-processing though.

@aggpankaj2
Copy link

aggpankaj2 commented Jul 25, 2018

##@wildpig22
Object detection model is not working with deconv_visualization.
With activation_visualization works well.
import argparse
import sys
import os
import numpy as np
import tensorflow as tf
from Save_load_graph import load_graph
from scipy.misc import imread, imresize
import argparse
import cv2
import time
from tensorflow.examples.tutorials.mnist import input_data
from tf_cnnvis import *
def evalFrozenGraphModel(FLAGS):
graph = load_graph(FLAGS.model_dir + 'frozen_inference_graph.pb'
X = graph.get_tensor_by_name('image_tensor:0')
y_predict = graph.get_tensor_by_name("detection_classes:0")
im = np.expand_dims(imresize(imread(os.path.join("sample_images", "Lenna.png")), (224, 224)), axis=0)
with tf.Session(graph=graph) as sess:
# is_success = activation_visualization(sess_graph_path=graph, value_feed_dict=feed_dict,
# layers=layers, path_logdir=os.path.join("Log", "2.9"),
# path_outdir=os.path.join("Output", "2.9"))
is_success = deconv_visualization(sess_graph_path=graph, value_feed_dict=feed_dict,
input_tensor=None, layers=layers,
path_logdir=os.path.join("Log", "1"),
path_outdir=os.path.join("Output", "1"))
if name == 'main':
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str,
default='/resnet101_correcteddata/inference_resnet_1class_1batch/',

Error outer_forward_ctxt = forward_ctxt.outer_context
AttributeError: 'NoneType' object has no attribute 'outer_context'

Tried so many things given here #12 and #26 but still getting error.

@jidebingfeng
Copy link
Author

@wildpig22 Thanks very much! It works! And I read the source code, find that some tensor in pre-processing is not trainable.

@jidebingfeng
Copy link
Author

@aggpankaj2
this is my code. It works.


PATH_TO_CKPT = 'data/tf1.9.5819/frozen_inference_graph.pb'

detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')


image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
input_tensor_placehodler = detection_graph.get_tensor_by_name('FirstStageFeatureExtractor/resnet_v1_101/resnet_v1_101/Pad:0')
image = mpimg.imread('data/1.jpg')
image_np_expanded = np.expand_dims(image, axis=0)


layers = ['FirstStageFeatureExtractor/resnet_v1_101/resnet_v1_101/block1/unit_1/bottleneck_v1/conv1/Conv2D',]


is_success = deconv_visualization(sess_graph_path='data/tf1.9.5819/model.ckpt.meta', value_feed_dict={image_tensor: image_np_expanded},
                                  input_tensor=input_tensor_placehodler,
                                  layers=layers, path_logdir=os.path.join("Log", "Inception5"),
                                  path_outdir=os.path.join("Log", "Inception5"))

Addtion TensorFlow versoin is 1.9.

@jidebingfeng
Copy link
Author

@wildpig22 Did you have any idea about use GPU?
I have two GPUs, but still take large time. GPU-Util is 0%, Memory-Usage is 7857MiB / 8114MiB
Logs:

Reconstruction Completed for FirstStageFeatureExtractor/resnet_v1_101/resnet_v1_101/conv1/Conv2D layer. Time taken = 7.082620 s
Reconstruction Completed for FirstStageFeatureExtractor/resnet_v1_101/resnet_v1_101/block3/unit_1/bottleneck_v1/conv1/Conv2D layer. Time taken = 123.632443 s
Reconstruction Completed for FirstStageFeatureExtractor/resnet_v1_101/resnet_v1_101/block3/unit_9/bottleneck_v1/conv2/Conv2D layer. Time taken = 111.493410 s
Reconstruction Completed for FirstStageFeatureExtractor/resnet_v1_101/resnet_v1_101/block3/unit_17/bottleneck_v1/conv3/Conv2D layer. Time taken = 602.894801 s

@wildpig22
Copy link

@jidebingfeng I haven't figured that out either, sometimes I even have to set clear_devices=True to work...

@jidebingfeng
Copy link
Author

@wildpig22 In fact, it works well with the code os.environ["CUDA_VISIBLE_DEVICES"] = "0". Some layer is close to input_tensor or it has few GPU operation, so GPU-Util is 0%.

@aggpankaj2
Copy link

@jidebingfeng Thank you very much. It work. Had you tried for deconv visualization for mobilenetv2 ?

@jidebingfeng
Copy link
Author

@aggpankaj2 No. I just tried for faster_rcnn_resnet101. I think the deconv_visualization function will work well, if you make sure the layers between input_tensor and the deconv layer are trainable.

@jidebingfeng
Copy link
Author

@aggpankaj2 You can judge the layer is trainable by it's dtype.That's the way how I find out the problem of faster_rcnn_resnet101 . Some code:

for op in detection_graph.get_operations():
    t = op.type.lower()
    if t == 'maxpool' or t == 'relu' or t == 'conv2d':
        for input in op.inputs:
            print(input.name, input.dtype)

@aggpankaj2
Copy link

@jidebingfeng thanks for your reply. Yes i tried same as per your suggestion but not working

@wildpig22
Copy link

@aggpankaj2 do you mean mobilenetV2 or mobilenetV2+SSD?

Try this code for mobilenetV2+SSD (tensorflow 1.9.0)

import os
import sys
import time
import copy
import h5py
import numpy as np

from tf_cnnvis import *

import tensorflow as tf
from scipy.misc import imread, imresize

with tf.gfile.FastGFile('frozen.pb', 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='FeatureExtractor/MobilenetV2/MobilenetV2/input') 
tf.import_graph_def(graph_def, {'FeatureExtractor/MobilenetV2/MobilenetV2/input':t_input})

im = np.expand_dims(imresize(imread(os.path.join("./images", "image.jpg")), (300, 300)), axis = 0)
mean = 2/255.0
dev = 1.0
im_processed = im * mean - dev

layers = ["r", "p", "c"]

is_success = deconv_visualization(sess_graph_path = tf.get_default_graph(), value_feed_dict = {t_input : im_processed}, 
                                      layers=layers,
                                      path_logdir=os.path.join("./Log","ssd"), 
                                      path_outdir=os.path.join("./Output","ssd"))

@aggpankaj2
Copy link

@wildpig22 thanks for your code, but not working.

@wildpig22
Copy link

wildpig22 commented Aug 2, 2018

@aggpankaj2 which model are you using, can you provide a link?

Also, what is the error?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants