Skip to content

Commit

Permalink
analyzer: Dump tensors of each subgraph
Browse files Browse the repository at this point in the history
It makes analyzer output more useful.

example)

Your TFLite model has ‘1’ subgraph(s). In the subgraph description below,
T# represents the Tensor numbers. For example, in Subgraph#0, the RESHAPE op takes
tensor #0 and tensor #1 as input and produces tensor #5 as output.

Subgraph#0 main(T#0) -> [T#9]
  Op#0 RESHAPE(T#0, T#1) -> [T#5]
  Op#1 STRIDED_SLICE(T#5, T#2, T#2, T#3) -> [T#6]
  Op#2 RESIZE_BILINEAR(T#6, T#4) -> [T#7]
  Op#3 RESIZE_BILINEAR(T#6, T#4) -> [T#8]
  Op#4 ADD(T#7, T#8) -> [T#9]

Tensors of Subgraph#0
  T#0(image) shape:[5, 5], type:FLOAT32
  T#1(strided_slice) shape:[4], type:INT32
  T#2(strided_slice1) shape:[4], type:INT32
  T#3(strided_slice2) shape:[4], type:INT32
  T#4(ResizeBilinear/size) shape:[2], type:INT32
  T#5(strided_slice3) shape:[1, 5, 1, 5], type:FLOAT32
  T#6(strided_slice4) shape:[1, 5, 5, 1], type:FLOAT32
  T#7(ResizeBilinear) shape:[1, 2, 2, 1], type:FLOAT32
  T#8(ResizeBilinear_1) shape:[1, 2, 2, 1], type:FLOAT32
  T#9(Identity) shape:[1, 2, 2, 1], type:FLOAT32

PiperOrigin-RevId: 389795468
Change-Id: I0fda5bb74568c68459359a8a39f1627b459b7a4b
  • Loading branch information
terryheo authored and tensorflower-gardener committed Aug 10, 2021
1 parent 99f2eb7 commit a1d81c8
Showing 1 changed file with 55 additions and 9 deletions.
64 changes: 55 additions & 9 deletions tensorflow/lite/python/analyzer_wrapper/model_analyzer.cc
Expand Up @@ -28,9 +28,42 @@ namespace tflite {

namespace {

void dump_tensors(std::stringstream& out_stream,
const flatbuffers::Vector<int32_t>* tensors,
bool verbose = false) {
// Dump details of the given tensor.
void dump_tensor_detail(std::stringstream& out_stream,
const tflite::Tensor* tensor, const int tensor_idx) {
out_stream << "T#" << tensor_idx;
out_stream << "(" << tensor->name()->str() << ") ";
// Prints `shape_signature` instead of `shape` if it's available since it
// supports dynamic shapes.
if (tensor->shape_signature()) {
out_stream << "shape_signature:[";
for (int i = 0; i < tensor->shape_signature()->Length(); ++i) {
const int j = tensor->shape_signature()->Get(i);
out_stream << j;
if (i != tensor->shape_signature()->Length() - 1) {
out_stream << ", ";
}
}
out_stream << "]";
} else {
out_stream << "shape:[";
for (int i = 0; i < tensor->shape()->Length(); ++i) {
const int j = tensor->shape()->Get(i);
out_stream << j;
if (i != tensor->shape()->Length() - 1) {
out_stream << ", ";
}
}
out_stream << "]";
}
out_stream << ", type:" << EnumNameTensorType(tensor->type());
out_stream << "\n";
}

// Dump list of input or output tensors.
void dump_tensor_list(std::stringstream& out_stream,
const flatbuffers::Vector<int32_t>* tensors,
bool verbose = false) {
for (int i = 0; i < tensors->Length(); ++i) {
const int tensor_idx = tensors->Get(i);
if (verbose) {
Expand All @@ -48,6 +81,7 @@ void dump_tensors(std::stringstream& out_stream,
}
}

// Returns the string representation of the given OperatorCode.
const std::string get_op_name(const OperatorCode* op_code) {
auto builtin_code = GetBuiltinCode(op_code);
if (builtin_code != BuiltinOperator_CUSTOM) {
Expand All @@ -57,17 +91,20 @@ const std::string get_op_name(const OperatorCode* op_code) {
}
}

// Dump the given Operator node.
void dump_node(std::stringstream& out_stream, const int node_no,
const OperatorCode* op_code, const Operator* op,
const SubGraph* subgraph) {
out_stream << "Op#" << node_no << " " << get_op_name(op_code);
out_stream << "(";
dump_tensors(out_stream, op->inputs());
dump_tensor_list(out_stream, op->inputs());
out_stream << ") -> [";
dump_tensors(out_stream, op->outputs());
dump_tensor_list(out_stream, op->outputs());
out_stream << "]\n";
}

// Dump the summary of the given TFLite flatbuffer model. It's printed at the
// beginning of the analyzer output.
void dump_model_summary(std::stringstream& out_stream,
const ::tflite::Model* model) {
auto* subgraphs = model->subgraphs();
Expand All @@ -81,9 +118,9 @@ void dump_model_summary(std::stringstream& out_stream,
model->operator_codes()->Get(first_op->opcode_index());
out_stream << "For example, in Subgraph#0, the "
<< get_op_name(first_op_code) << " op takes\n";
dump_tensors(out_stream, first_op->inputs(), /*verbose=*/true);
dump_tensor_list(out_stream, first_op->inputs(), /*verbose=*/true);
out_stream << " as input and produces ";
dump_tensors(out_stream, first_op->outputs(), /*verbose=*/true);
dump_tensor_list(out_stream, first_op->outputs(), /*verbose=*/true);
out_stream << " as output.\n\n";
}
}
Expand Down Expand Up @@ -141,9 +178,9 @@ std::string model_analyzer(const std::string& model_file_or_buffer,
out_stream << " " << subgraph->name()->str();
}
out_stream << "(";
dump_tensors(out_stream, subgraph->inputs());
dump_tensor_list(out_stream, subgraph->inputs());
out_stream << ") -> [";
dump_tensors(out_stream, subgraph->outputs());
dump_tensor_list(out_stream, subgraph->outputs());
out_stream << "]\n";
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
Expand All @@ -169,6 +206,15 @@ std::string model_analyzer(const std::string& model_file_or_buffer,
<< " with TFLite runtime version " << TF_VERSION_STRING
<< "\n";
}

// Dump Subgraph Tensors.
out_stream << "\nTensors of Subgraph#" << i << "\n";
auto tensors = subgraph->tensors();
for (int j = 0; j < tensors->Length(); ++j) {
auto tensor = tensors->Get(j);
out_stream << " "; // indents for tensors
dump_tensor_detail(out_stream, tensor, j);
}
}
if (check_gpu_compatibility && model_is_gpu_compatibile) {
out_stream
Expand Down

0 comments on commit a1d81c8

Please sign in to comment.