From c8580300343ca92a31fa58f73831eaabf60971ff Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 11 Sep 2023 22:55:39 -0400 Subject: [PATCH 01/18] fix np.loadtxt DeprecationWarning (#2802) ``` DeprecationWarning: loadtxt(): Parsing an integer via a float is deprecated. To avoid this warning, you can: * make sure the original data is stored as integers. * use the `converters=` keyword argument. If you only use NumPy 1.23 or later, `converters=float` will normally work. * Use `np.loadtxt(...).astype(np.int64)` parsing the file as floating point and then convert it. (On all NumPy versions.) (Deprecated NumPy 1.23) return np.loadtxt(str(self.path), **kwargs) ``` Signed-off-by: Jinzhe Zeng --- deepmd/utils/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 485079b08d..57bea00fac 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -580,7 +580,7 @@ def _load_data( return np.float32(0.0), data def _load_type(self, sys_path: DPPath): - atom_type = (sys_path / "type.raw").load_txt(dtype=np.int32, ndmin=1) + atom_type = (sys_path / "type.raw").load_txt(ndmin=1).astype(np.int32) return atom_type def _load_type_mix(self, set_name: DPPath): From b6ff8aaddd7dbd74f00dff97ecedaa84a5f76e75 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 11 Sep 2023 23:15:57 -0400 Subject: [PATCH 02/18] support atomic/relative model deviation in CLI (#2801) Fix #2017. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/infer/model_devi.py | 144 +++++++++++++++++++++++++++--- deepmd_cli/main.py | 16 ++++ doc/test/model-deviation.md | 11 +++ doc/third-party/lammps-command.md | 2 +- pyproject.toml | 2 +- source/tests/test_model_devi.py | 89 ++++++++++++++++++ 6 files changed, 248 insertions(+), 16 deletions(-) diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py index e9950f9d5e..8c329a0845 100644 --- a/deepmd/infer/model_devi.py +++ b/deepmd/infer/model_devi.py @@ -2,6 +2,7 @@ from typing import ( Optional, Tuple, + overload, ) import numpy as np @@ -20,10 +21,39 @@ DeepPot, ) +try: + from typing import Literal # python >=3.8 +except ImportError: + from typing_extensions import Literal # type: ignore + + +@overload +def calc_model_devi_f( + fs: np.ndarray, + real_f: Optional[np.ndarray] = None, + relative: Optional[float] = None, + atomic: Literal[False] = False, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ... + + +@overload +def calc_model_devi_f( + fs: np.ndarray, + real_f: Optional[np.ndarray] = None, + relative: Optional[float] = None, + *, + atomic: Literal[True], +) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + ... + def calc_model_devi_f( - fs: np.ndarray, real_f: Optional[np.ndarray] = None -) -> Tuple[np.ndarray]: + fs: np.ndarray, + real_f: Optional[np.ndarray] = None, + relative: Optional[float] = None, + atomic: bool = False, +) -> Tuple[np.ndarray, ...]: """Calculate model deviation of force. Parameters @@ -33,6 +63,12 @@ def calc_model_devi_f( real_f : numpy.ndarray or None real force, size of `n_frames x n_atoms x 3`. If given, the RMS real error is calculated instead. + relative : float, default: None + If given, calculate the relative model deviation of force. The + value is the level parameter for computing the relative model + deviation of the force. + atomic : bool, default: False + Whether return deviation of force in all atoms Returns ------- @@ -42,6 +78,8 @@ def calc_model_devi_f( minimum deviation of force in all atoms avg_devi_f : numpy.ndarray average deviation of force in all atoms + fs_devi : numpy.ndarray + deviation of force in all atoms, returned if atomic=True """ if real_f is None: fs_devi = np.linalg.norm(np.std(fs, axis=0), axis=-1) @@ -49,9 +87,21 @@ def calc_model_devi_f( fs_devi = np.linalg.norm( np.sqrt(np.mean(np.square(fs - real_f), axis=0)), axis=-1 ) + if relative is not None: + if real_f is None: + # if real force is not given, the magnitude is calculated from mean value of four models + # See DeepPotModelDevi::compute_relative_std_f + # See also Eq. 71 in DeePMD-kit v2 paepr + magnitude = np.linalg.norm(np.mean(fs, axis=0), axis=-1) + else: + # otherwise, the magnitude is calculated from the real force + magnitude = np.linalg.norm(real_f, axis=-1) + fs_devi /= magnitude + relative max_devi_f = np.max(fs_devi, axis=-1) min_devi_f = np.min(fs_devi, axis=-1) avg_devi_f = np.mean(fs_devi, axis=-1) + if atomic: + return max_devi_f, min_devi_f, avg_devi_f, fs_devi return max_devi_f, min_devi_f, avg_devi_f @@ -86,8 +136,10 @@ def calc_model_devi_e( def calc_model_devi_v( - vs: np.ndarray, real_v: Optional[np.ndarray] = None -) -> Tuple[np.ndarray]: + vs: np.ndarray, + real_v: Optional[np.ndarray] = None, + relative: Optional[float] = None, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Calculate model deviation of virial. Parameters @@ -97,6 +149,10 @@ def calc_model_devi_v( real_v : numpy.ndarray real virial, size of `n_frames x 9`. If given, the RMS real error is calculated instead. + relative : float, default: None + If given, calculate the relative model deviation of virial. The + value is the level parameter for computing the relative model + deviation of the virial. Returns ------- @@ -111,13 +167,25 @@ def calc_model_devi_v( vs_devi = np.std(vs, axis=0) else: vs_devi = np.sqrt(np.mean(np.square(vs - real_v), axis=0)) + if relative is not None: + if real_v is None: + # if real virial is not given, the magnitude is calculated from mean value of four models + # See DeepPotModelDevi::compute_relative_std_v + # See also Eq. 72 in DeePMD-kit v2 paepr + magnitude = np.linalg.norm(np.mean(vs, axis=0), axis=-1) + else: + # otherwise, the magnitude is calculated from the real virial + magnitude = np.linalg.norm(real_v, axis=-1) + vs_devi /= magnitude + relative max_devi_v = np.max(vs_devi, axis=-1) min_devi_v = np.min(vs_devi, axis=-1) avg_devi_v = np.linalg.norm(vs_devi, axis=-1) / 3 return max_devi_v, min_devi_v, avg_devi_v -def write_model_devi_out(devi: np.ndarray, fname: str, header: str = ""): +def write_model_devi_out( + devi: np.ndarray, fname: str, header: str = "", atomic: bool = False +): """Write output of model deviation. Parameters @@ -128,8 +196,13 @@ def write_model_devi_out(devi: np.ndarray, fname: str, header: str = ""): the file name to dump header : str, default="" the header to dump + atomic : bool, default: False + whether atomic model deviation is printed """ - assert devi.shape[1] == 8 + if not atomic: + assert devi.shape[1] == 8 + else: + assert devi.shape[1] > 8 header = "%s\n%10s" % (header, "step") for item in "vf": header += "%19s%19s%19s" % ( @@ -138,11 +211,13 @@ def write_model_devi_out(devi: np.ndarray, fname: str, header: str = ""): f"avg_devi_{item}", ) header += "%19s" % "devi_e" + if atomic: + header += "%19s" % "atm_devi_f(N)" with open(fname, "ab") as fp: np.savetxt( fp, devi, - fmt=["%12d"] + ["%19.6e" for _ in range(7)], + fmt=["%12d"] + ["%19.6e" for _ in range(devi.shape[1] - 1)], delimiter="", header=header, ) @@ -175,6 +250,9 @@ def calc_model_devi( fparam: Optional[np.ndarray] = None, aparam: Optional[np.ndarray] = None, real_data: Optional[dict] = None, + atomic: bool = False, + relative: Optional[float] = None, + relative_v: Optional[float] = None, ): """Python interface to calculate model deviation. @@ -200,6 +278,16 @@ def calc_model_devi( atomic specific parameters real_data : dict, optional real data to calculate RMS real error + atomic : bool, default: False + If True, calculate the force model deviation of each atom. + relative : float, default: None + If given, calculate the relative model deviation of force. The + value is the level parameter for computing the relative model + deviation of the force. + relative_v : float, default: None + If given, calculate the relative model deviation of virial. The + value is the level parameter for computing the relative model + deviation of the virial. Returns ------- @@ -241,16 +329,26 @@ def calc_model_devi( devi = [np.arange(coord.shape[0]) * frequency] if real_data is None: - devi += list(calc_model_devi_v(virials)) - devi += list(calc_model_devi_f(forces)) + devi += list(calc_model_devi_v(virials, relative=relative_v)) + devi_f = list(calc_model_devi_f(forces, relative=relative, atomic=atomic)) + devi += devi_f[:3] devi.append(calc_model_devi_e(energies)) else: - devi += list(calc_model_devi_v(virials, real_data["virial"])) - devi += list(calc_model_devi_f(forces, real_data["force"])) + devi += list( + calc_model_devi_v(virials, real_data["virial"], relative=relative_v) + ) + devi_f = list( + calc_model_devi_f( + forces, real_data["force"], relative=relative, atomic=atomic + ) + ) + devi += devi_f[:3] devi.append(calc_model_devi_e(energies, real_data["energy"])) devi = np.vstack(devi).T + if atomic: + devi = np.concatenate([devi, devi_f[3]], axis=1) if fname: - write_model_devi_out(devi, fname) + write_model_devi_out(devi, fname, atomic=atomic) return devi @@ -262,6 +360,9 @@ def make_model_devi( output: str, frequency: int, real_error: bool = False, + atomic: bool = False, + relative: Optional[float] = None, + relative_v: Optional[float] = None, **kwargs, ): """Make model deviation calculation. @@ -282,6 +383,16 @@ def make_model_devi( This paramter is used to determine the index in the output file. real_error : bool, default: False If True, calculate the RMS real error instead of model deviation. + atomic : bool, default: False + If True, calculate the force model deviation of each atom. + relative : float, default: None + If given, calculate the relative model deviation of force. The + value is the level parameter for computing the relative model + deviation of the force. + relative_v : float, default: None + If given, calculate the relative model deviation of virial. The + value is the level parameter for computing the relative model + deviation of the virial. **kwargs Arbitrary keyword arguments. """ @@ -305,7 +416,9 @@ def make_model_devi( for system in all_sys: # create data-system - dp_data = DeepmdData(system, set_prefix, shuffle_test=False, type_map=tmap) + dp_data = DeepmdData( + system, set_prefix, shuffle_test=False, type_map=tmap, sort_atoms=False + ) if first_dp.get_dim_fparam() > 0: dp_data.add( "fparam", @@ -385,11 +498,14 @@ def make_model_devi( fparam=fparam, aparam=aparam, real_data=real_data, + atomic=atomic, + relative=relative, + relative_v=relative_v, ) nframes_tot += coord.shape[0] devis.append(devi) devis = np.vstack(devis) devis[:, 0] = np.arange(nframes_tot) * frequency - write_model_devi_out(devis, output, header=system) + write_model_devi_out(devis, output, header=system, atomic=atomic) devis_coll.append(devis) return devis_coll diff --git a/deepmd_cli/main.py b/deepmd_cli/main.py index f707bf7589..94ceb9888d 100644 --- a/deepmd_cli/main.py +++ b/deepmd_cli/main.py @@ -454,6 +454,22 @@ def main_parser() -> argparse.ArgumentParser: default=False, help="Calculate the RMS real error of the model. The real data should be given in the systems.", ) + parser_model_devi.add_argument( + "--atomic", + action="store_true", + default=False, + help="Print the force model deviation of each atom.", + ) + parser_model_devi.add_argument( + "--relative", + type=float, + help="Calculate the relative model deviation of force. The level parameter for computing the relative model deviation of the force should be given.", + ) + parser_model_devi.add_argument( + "--relative_v", + type=float, + help="Calculate the relative model deviation of virial. The level parameter for computing the relative model deviation of the virial should be given.", + ) # * convert models parser_transform = subparsers.add_parser( diff --git a/doc/test/model-deviation.md b/doc/test/model-deviation.md index 41cda9ddb7..6a89d7c2f4 100644 --- a/doc/test/model-deviation.md +++ b/doc/test/model-deviation.md @@ -36,3 +36,14 @@ optional arguments: ``` For more details concerning the definition of model deviation and its application, please refer to [Yuzhi Zhang, Haidi Wang, Weijie Chen, Jinzhe Zeng, Linfeng Zhang, Han Wang, and Weinan E, DP-GEN: A concurrent learning platform for the generation of reliable deep learning based potential energy models, Computer Physics Communications, 2020, 253, 107206.](https://doi.org/10.1016/j.cpc.2020.107206) + +## Relative model deviation + +By default, the model deviation is output in absolute value. If the argument `--relative` is passed, then the relative model deviation of the force will be output, including values output by the argument `--atomic`. The relative model deviation of the force on atom $i$ is defined by + +$$E_{f_i}=\frac{\left|D_{f_i}\right|}{\left|f_i\right|+l}$$ + +where $D_{f_i}$ is the absolute model deviation of the force on atom $i$, $f_i$ is the norm of the force and $l$ is provided as the parameter of the keyword `relative`. +If the argument `--relative_v` is set, then the relative model deviation of the virial will be output instead of the absolute value, with the same definition of that of the force: + +$$E_{v_i}=\frac{\left|D_{v_i}\right|}{\left|v_i\right|+l}$$ diff --git a/doc/third-party/lammps-command.md b/doc/third-party/lammps-command.md index 15acb2e497..e1d482381f 100644 --- a/doc/third-party/lammps-command.md +++ b/doc/third-party/lammps-command.md @@ -40,7 +40,7 @@ and the model deviation will be computed among all models every `out_freq` times fparam_from_compute value = id id = compute id used to update the frame parameter. atomic = no value is required. - If this keyword is set, the model deviation of each atom will be output. + If this keyword is set, the force model deviation of each atom will be output. relative value = level level = The level parameter for computing the relative model deviation of the force relative_v value = level diff --git a/pyproject.toml b/pyproject.toml index 7b8f55d562..b169a3b0eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ dependencies = [ 'pyyaml', 'dargs >= 0.3.5', 'python-hostlist >= 1.21', - 'typing_extensions; python_version < "3.7"', + 'typing_extensions; python_version < "3.8"', 'importlib_metadata>=1.4; python_version < "3.8"', 'h5py', 'wcmatch', diff --git a/source/tests/test_model_devi.py b/source/tests/test_model_devi.py index 91c95af46c..c7d050cd76 100644 --- a/source/tests/test_model_devi.py +++ b/source/tests/test_model_devi.py @@ -113,6 +113,95 @@ def test_make_model_devi_real_erorr(self): 6, ) + def test_make_model_devi_atomic_relative(self): + _, expected_f, expected_v = self.graphs[0].eval( + self.coord[0], self.box[0], self.atype + ) + _, expected_f2, expected_v2 = self.graphs[1].eval( + self.coord[0], self.box[0], self.atype + ) + expected_f = expected_f.reshape((-1, 3)) + expected_f2 = expected_f2.reshape((-1, 3)) + expected_v = expected_v.reshape((-1, 3)) + expected_v2 = expected_v2.reshape((-1, 3)) + relative = 1.0 + make_model_devi( + models=self.graph_dirs, + system=self.data_dir, + set_prefix="set", + output=self.output, + frequency=self.freq, + atomic=True, + relative=relative, + ) + md = np.loadtxt(self.output) + # copy from lammps test + norm = np.linalg.norm(np.mean([expected_f, expected_f2], axis=0), axis=1) + expected_md_f = np.linalg.norm( + np.std([expected_f, expected_f2], axis=0), axis=1 + ) + expected_md_f /= norm + relative + np.testing.assert_allclose(md[8:], expected_md_f, 6) + np.testing.assert_allclose(md[7], self.expect[7], 6) + np.testing.assert_allclose(md[4], np.max(expected_md_f), 6) + np.testing.assert_allclose(md[5], np.min(expected_md_f), 6) + np.testing.assert_allclose(md[6], np.mean(expected_md_f), 6) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) + / 6 + ) + np.testing.assert_allclose(md[1], np.max(expected_md_v), 6) + np.testing.assert_allclose(md[2], np.min(expected_md_v), 6) + np.testing.assert_allclose(md[3], np.sqrt(np.mean(np.square(expected_md_v))), 6) + + def test_make_model_devi_atomic_relative_v(self): + _, expected_f, expected_v = self.graphs[0].eval( + self.coord[0], self.box[0], self.atype + ) + _, expected_f2, expected_v2 = self.graphs[1].eval( + self.coord[0], self.box[0], self.atype + ) + expected_f = expected_f.reshape((-1, 3)) + expected_f2 = expected_f2.reshape((-1, 3)) + expected_v = expected_v.reshape((-1, 3)) + expected_v2 = expected_v2.reshape((-1, 3)) + relative = 1.0 + make_model_devi( + models=self.graph_dirs, + system=self.data_dir, + set_prefix="set", + output=self.output, + frequency=self.freq, + atomic=True, + relative_v=relative, + ) + md = np.loadtxt(self.output) + # copy from lammps test + expected_md_f = np.linalg.norm( + np.std([expected_f, expected_f2], axis=0), axis=1 + ) + np.testing.assert_allclose(md[8:], expected_md_f, 6) + np.testing.assert_allclose(md[7], self.expect[7], 6) + np.testing.assert_allclose(md[4], np.max(expected_md_f), 6) + np.testing.assert_allclose(md[5], np.min(expected_md_f), 6) + np.testing.assert_allclose(md[6], np.mean(expected_md_f), 6) + expected_md_v = ( + np.std([np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0) + / 6 + ) + norm = ( + np.abs( + np.mean( + [np.sum(expected_v, axis=0), np.sum(expected_v2, axis=0)], axis=0 + ) + ) + / 6 + ) + expected_md_v /= norm + relative + np.testing.assert_allclose(md[1], np.max(expected_md_v), 6) + np.testing.assert_allclose(md[2], np.min(expected_md_v), 6) + np.testing.assert_allclose(md[3], np.sqrt(np.mean(np.square(expected_md_v))), 6) + def tearDown(self): for pb in self.graph_dirs: os.remove(pb) From 89d0278bc6608a50442cb52760594a51c554962a Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 11 Sep 2023 23:19:21 -0400 Subject: [PATCH 03/18] docs: add doc to install cmake (#2805) Signed-off-by: Jinzhe Zeng --- doc/install/install-from-source.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index dd28d86ae5..4d75d484ec 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -166,6 +166,13 @@ cd $deepmd_source_dir/source mkdir build cd build ``` + +The installation requires CMake 3.16 or later for the CPU version, CMake 3.23 or later for the CUDA support, and CMake 3.21 or later for the ROCM support. One can install CMake via `pip` if it is not installed or the installed version does not satisfy the requirement: + +```sh +pip install -U cmake +``` + I assume you have activated the TensorFlow Python environment and want to install DeePMD-kit into path `$deepmd_root`, then execute CMake ```bash cmake -DUSE_TF_PYTHON_LIBS=TRUE -DCMAKE_INSTALL_PREFIX=$deepmd_root .. From 13d49bd97c060525957449e9e214c47e3154b6b2 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 11 Sep 2023 23:44:44 -0400 Subject: [PATCH 04/18] docs: add docs for addtional CMake arguments via pip (#2806) Fix #2432. Signed-off-by: Jinzhe Zeng --- doc/install/install-from-source.md | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index 4d75d484ec..e6a4b1a7cb 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -78,6 +78,7 @@ One may set the following environment variables before executing `pip`: | ROCM_ROOT | Path | Detected automatically | The path to the ROCM toolkit directory. | | TENSORFLOW_ROOT | Path | Detected automatically | The path to TensorFlow Python library. By default the installer only finds TensorFlow under user site-package directory (`site.getusersitepackages()`) or system site-package directory (`sysconfig.get_path("purelib")`) due to limitation of [PEP-517](https://peps.python.org/pep-0517/). If not found, the latest TensorFlow (or the environment variable `TENSORFLOW_VERSION` if given) from PyPI will be built against.| | DP_ENABLE_NATIVE_OPTIMIZATION | 0, 1 | 0 | Enable compilation optimization for the native machine's CPU type. Do not enable it if generated code will run on different CPUs. | +| CMAKE_ARGS | str | - | Additional CMake arguments | To test the installation, one should first jump out of the source directory ``` From 8cd822e2932bd7fd3a3e5fabff7e97fd67ac4f92 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 11 Sep 2023 23:47:31 -0400 Subject: [PATCH 05/18] ignore drdq when generalized force loss is not set (#2807) This PR fixes an error for the example in the `examples` directory. --- deepmd/loss/ener.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/deepmd/loss/ener.py b/deepmd/loss/ener.py index 4cc7619d50..7895fadbf3 100644 --- a/deepmd/loss/ener.py +++ b/deepmd/loss/ener.py @@ -121,13 +121,14 @@ def __init__( ) # drdq: the partial derivative of atomic coordinates w.r.t. generalized coordinates # TODO: could numb_generalized_coord decided from the training data? - add_data_requirement( - "drdq", - self.numb_generalized_coord * 3, - atomic=True, - must=False, - high_prec=False, - ) + if self.has_gf > 0: + add_data_requirement( + "drdq", + self.numb_generalized_coord * 3, + atomic=True, + must=False, + high_prec=False, + ) if self.enable_atom_ener_coeff: add_data_requirement( "atom_ener_coeff", From 445ec23aee974fe5b69f5752a49b2de9b55fd743 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 11:52:56 +0800 Subject: [PATCH 06/18] Bump docker/build-push-action from 4.1.1 to 4.2.1 (#2804) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4.1.1 to 4.2.1.
Release notes

Sourced from docker/build-push-action's releases.

v4.2.1

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.2.0...v4.2.1

v4.2.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.1.1...v4.2.0

Commits
  • 0a97817 Merge pull request #957 from crazy-max/warn-docker-config
  • ec39ef3 chore: update generated content
  • f46044b warn if docker config can't be parsed
  • 4e4ee68 Merge pull request #951 from crazy-max/ci-concurrency
  • e86cf55 ci: missing concurrency checks
  • daa0106 Merge pull request #949 from docker/dependabot/github_actions/actions/checkout-4
  • ce51e90 chore(deps): Bump actions/checkout from 3 to 4
  • 1fde163 Merge pull request #950 from crazy-max/fix-ci
  • ae311c5 ci: fix workflow
  • 9311bf5 Merge pull request #932 from crazy-max/form-templates
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=4.1.1&new-version=4.2.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build_wheel.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index 5f10a85269..e47d753f1c 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -113,7 +113,7 @@ jobs: images: ghcr.io/deepmodeling/deepmd-kit - name: Build and push Docker image - uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 + uses: docker/build-push-action@0a97817b6ade9f46837855d676c4cca3a2471fc9 with: context: source/install/docker push: ${{ github.repository_owner == 'deepmodeling' && github.event_name == 'push' }} From 70ccd41b49397b00f8030e43d361c0fc37e65fdf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 13 Sep 2023 09:46:25 +0800 Subject: [PATCH 07/18] [pre-commit.ci] pre-commit autoupdate (#2808) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/psf/black: 23.7.0 → 23.9.1](https://github.com/psf/black/compare/23.7.0...23.9.1) - [github.com/astral-sh/ruff-pre-commit: v0.0.287 → v0.0.288](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.287...v0.0.288) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ba11bbcf50..d39f5ec127 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ repos: - id: check-toml # Python - repo: https://github.com/psf/black - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black-jupyter - repo: https://github.com/PyCQA/isort @@ -33,7 +33,7 @@ repos: files: \.py$ - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.0.287 + rev: v0.0.288 hooks: - id: ruff args: ["--fix"] From 959c1299be968bcf209f7245e4bcdfe28eff185f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 12 Sep 2023 21:58:08 -0400 Subject: [PATCH 08/18] drop old GCC versions in test (#2812) Signed-off-by: Jinzhe Zeng --- .github/workflows/test_python.yml | 35 +++++++++++++------------------ backend/find_tensorflow.py | 21 +++++++++++++++++++ 2 files changed, 36 insertions(+), 20 deletions(-) diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index 25dded26aa..0ac29a7d9b 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -5,48 +5,43 @@ name: Test Python jobs: testpython: name: Test Python - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: include: - python: 3.7 - gcc: 5 - tf: 1.14 - - python: 3.7 - gcc: 6 - tf: 1.14 - - python: 3.7 - gcc: 7 - tf: 1.14 - - python: 3.7 - gcc: 8 tf: 1.14 - python: 3.8 - gcc: 8 tf: - python: "3.11" - gcc: 8 tf: - container: ghcr.io/deepmodeling/deepmd-kit-test-environment:py${{ matrix.python }}-gcc${{ matrix.gcc }}-tf${{ matrix.tf }} steps: - - name: work around permission issue - run: git config --global --add safe.directory /__w/deepmd-kit/deepmd-kit - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + cache: 'pip' + - uses: mpi4py/setup-mpi@v1 + if: ${{ matrix.tf == '' }} + with: + mpi: openmpi # https://github.com/pypa/pip/issues/11770 - run: python -m pip install -U "pip>=21.3.1,!=23.0.0" - run: pip install -e .[cpu,test] env: - CC: gcc-${{ matrix.gcc }} - CXX: g++-${{ matrix.gcc }} TENSORFLOW_VERSION: ${{ matrix.tf }} DP_BUILD_TESTING: 1 + - run: pip install horovod mpi4py + if: ${{ matrix.tf == '' }} + env: + HOROVOD_WITH_TENSORFLOW: 1 + HOROVOD_WITHOUT_GLOO: 1 - run: dp --version - run: pytest --cov=deepmd --cov=deepmd_cli source/tests --durations=0 - uses: codecov/codecov-action@v3 with: gcov: true - gcov_executable: gcov-${{ matrix.gcc }} pass: name: Pass testing Python needs: [testpython] diff --git a/backend/find_tensorflow.py b/backend/find_tensorflow.py index 8fe3cedb63..aa75d5ecb4 100644 --- a/backend/find_tensorflow.py +++ b/backend/find_tensorflow.py @@ -112,16 +112,31 @@ def get_tf_requirement(tf_version: str = "") -> dict: if tf_version == "": tf_version = os.environ.get("TENSORFLOW_VERSION", "") + extra_requires = [] + extra_select = {} + if not (tf_version == "" or tf_version in SpecifierSet(">=2.12")): + extra_requires.append("protobuf<3.20") + if tf_version == "" or tf_version in SpecifierSet(">=1.15"): + extra_select["mpi"] = [ + "horovod", + "mpi4py", + ] + else: + extra_select["mpi"] = [] + if tf_version == "": return { "cpu": [ "tensorflow-cpu; platform_machine!='aarch64' and (platform_machine!='arm64' or platform_system != 'Darwin')", "tensorflow; platform_machine=='aarch64' or (platform_machine=='arm64' and platform_system == 'Darwin')", + *extra_requires, ], "gpu": [ "tensorflow", "tensorflow-metal; platform_machine=='arm64' and platform_system == 'Darwin'", + *extra_requires, ], + **extra_select, } elif tf_version in SpecifierSet("<1.15") or tf_version in SpecifierSet( ">=2.0,<2.1" @@ -129,22 +144,28 @@ def get_tf_requirement(tf_version: str = "") -> dict: return { "cpu": [ f"tensorflow=={tf_version}", + *extra_requires, ], "gpu": [ f"tensorflow-gpu=={tf_version}; platform_machine!='aarch64'", f"tensorflow=={tf_version}; platform_machine=='aarch64'", + *extra_requires, ], + **extra_select, } else: return { "cpu": [ f"tensorflow-cpu=={tf_version}; platform_machine!='aarch64' and (platform_machine!='arm64' or platform_system != 'Darwin')", f"tensorflow=={tf_version}; platform_machine=='aarch64' or (platform_machine=='arm64' and platform_system == 'Darwin')", + *extra_requires, ], "gpu": [ f"tensorflow=={tf_version}", "tensorflow-metal; platform_machine=='arm64' and platform_system == 'Darwin'", + *extra_requires, ], + **extra_select, } From 3a98751d0e7735eac2b0b6e8176e9c490db2d516 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Sep 2023 20:05:37 +0000 Subject: [PATCH 09/18] Bump actions/checkout from 3 to 4 (#2803) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
Release notes

Sourced from actions/checkout's releases.

v4.0.0

What's Changed

New Contributors

Full Changelog: https://github.com/actions/checkout/compare/v3...v4.0.0

v3.6.0

What's Changed

New Contributors

Full Changelog: https://github.com/actions/checkout/compare/v3.5.3...v3.6.0

v3.5.3

What's Changed

New Contributors

Full Changelog: https://github.com/actions/checkout/compare/v3...v3.5.3

v3.5.2

What's Changed

Full Changelog: https://github.com/actions/checkout/compare/v3.5.1...v3.5.2

v3.5.1

What's Changed

New Contributors

... (truncated)

Changelog

Sourced from actions/checkout's changelog.

Changelog

v4.0.0

v3.6.0

v3.5.3

v3.5.2

v3.5.1

v3.5.0

v3.4.0

v3.3.0

v3.2.0

v3.1.0

v3.0.2

v3.0.1

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=3&new-version=4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build_cc.yml | 2 +- .github/workflows/build_wheel.yml | 6 +++--- .github/workflows/package_c.yml | 4 ++-- .github/workflows/test_cc.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_cc.yml b/.github/workflows/build_cc.yml index c2d2613464..500b305ba9 100644 --- a/.github/workflows/build_cc.yml +++ b/.github/workflows/build_cc.yml @@ -20,7 +20,7 @@ jobs: - variant: clang dp_variant: clang steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: true - uses: actions/setup-python@v4 diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index e47d753f1c..3e3a265159 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -38,7 +38,7 @@ jobs: platform_id: manylinux_aarch64 dp_variant: cpu steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: true # https://github.com/pypa/setuptools_scm/issues/480 @@ -60,7 +60,7 @@ jobs: name: Build source distribution runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: true - uses: actions/setup-python@v4 @@ -94,7 +94,7 @@ jobs: needs: [build_wheels] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/download-artifact@v3 with: name: artifact diff --git a/.github/workflows/package_c.yml b/.github/workflows/package_c.yml index d3be4c9469..ada205be00 100644 --- a/.github/workflows/package_c.yml +++ b/.github/workflows/package_c.yml @@ -9,7 +9,7 @@ jobs: name: Build C library runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Package C library run: ./source/install/docker_package_c.sh # for download and debug @@ -30,7 +30,7 @@ jobs: needs: [build_c] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download artifact uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/test_cc.yml b/.github/workflows/test_cc.yml index c53921bf60..9404f12937 100644 --- a/.github/workflows/test_cc.yml +++ b/.github/workflows/test_cc.yml @@ -7,7 +7,7 @@ jobs: name: Test C++ runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: '3.11' From a8b31237732ed04841ba48e7304f0edfaf3fd518 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 13 Sep 2023 21:45:08 -0400 Subject: [PATCH 10/18] `ndarray.tostring` -> `ndarray.tobytes` (#2814) `tostring` has been deprecated. See https://numpy.org/devdocs/reference/generated/numpy.ndarray.tostring.html Signed-off-by: Jinzhe Zeng --- deepmd/entrypoints/transfer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/entrypoints/transfer.py b/deepmd/entrypoints/transfer.py index dc580fbe0a..535b32ec09 100644 --- a/deepmd/entrypoints/transfer.py +++ b/deepmd/entrypoints/transfer.py @@ -196,7 +196,7 @@ def from_array( ) def from_str(self, tensor: np.ndarray): - self.node.attr["value"].tensor.tensor_content = tensor.tostring() + self.node.attr["value"].tensor.tensor_content = tensor.tobytes() def load_tensor(node: tf.Tensor, dtype_old: type, dtype_new: type) -> np.ndarray: From 58dd3e2449dba7719d5d6921ddd779bfbfc6005e Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 13 Sep 2023 21:45:37 -0400 Subject: [PATCH 11/18] `tf.accumulate_n` -> `tf.add_n` (#2815) `tf.accumulate_n` has been deprecated, and from the source code, I see that `tf.accumulate_n` is just a wrapper of `tf.add_n` since TF 2.1 (https://github.com/tensorflow/tensorflow/commit/292d3094313136b77bb5f444561bc3ffc529b246). See: https://www.tensorflow.org/api_docs/python/tf/math/accumulate_n https://www.tensorflow.org/api_docs/python/tf/math/add_n Signed-off-by: Jinzhe Zeng --- deepmd/descriptor/se_a.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index 1349f61464..82df8cc1a3 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -1054,8 +1054,8 @@ def _filter( # add zero is meaningless; skip rets.append(ret) start_index += self.sel_a[type_i] - # faster to use accumulate_n than multiple add - xyz_scatter_1 = tf.accumulate_n(rets) + # faster to use add_n than multiple add + xyz_scatter_1 = tf.add_n(rets) else: xyz_scatter_1 = self._filter_lower( type_i, From 7da9aaf075e8dea1eca9a08f99f3917235b55e3b Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 13 Sep 2023 21:46:43 -0400 Subject: [PATCH 12/18] `tf.test.TestCase.test_session` -> `tf.test.TestCase.cached_session` (#2816) `tf.test.TestCase.test_session` is deprecated in TF 1.11. We used it when we still tested TF 1.8, and now it is ok to replace it. --- source/tests/test_activation_fn_gelu.py | 2 +- source/tests/test_data_large_batch.py | 6 +++--- source/tests/test_data_modifier.py | 2 +- source/tests/test_data_modifier_shuffle.py | 2 +- source/tests/test_descrpt_hybrid.py | 2 +- source/tests/test_descrpt_nonsmth.py | 10 +++++----- source/tests/test_descrpt_se_a_mask.py | 2 +- source/tests/test_descrpt_se_a_type.py | 4 ++-- source/tests/test_descrpt_se_atten.py | 8 ++++---- source/tests/test_descrpt_se_r.py | 10 +++++----- source/tests/test_descrpt_sea_ef.py | 2 +- source/tests/test_descrpt_sea_ef_para.py | 2 +- source/tests/test_descrpt_sea_ef_rot.py | 2 +- source/tests/test_descrpt_sea_ef_vert.py | 2 +- source/tests/test_descrpt_smooth.py | 10 +++++----- source/tests/test_dipole_se_a.py | 2 +- source/tests/test_dipole_se_a_tebd.py | 2 +- source/tests/test_embedding_net.py | 2 +- source/tests/test_ewald.py | 6 +++--- source/tests/test_fitting_dos.py | 2 +- source/tests/test_fitting_ener_type.py | 2 +- source/tests/test_layer_name.py | 2 +- source/tests/test_linear_model.py | 2 +- source/tests/test_model_dos.py | 2 +- source/tests/test_model_loc_frame.py | 2 +- source/tests/test_model_multi.py | 2 +- source/tests/test_model_se_a.py | 6 +++--- source/tests/test_model_se_a_aparam.py | 2 +- source/tests/test_model_se_a_ebd.py | 2 +- source/tests/test_model_se_a_fparam.py | 2 +- source/tests/test_model_se_a_srtab.py | 2 +- source/tests/test_model_se_a_type.py | 2 +- source/tests/test_model_se_atten.py | 12 ++++++------ source/tests/test_model_se_r.py | 2 +- source/tests/test_model_se_t.py | 2 +- source/tests/test_model_spin.py | 2 +- source/tests/test_nvnmd_entrypoints.py | 6 +++--- source/tests/test_nvnmd_op.py | 20 ++++++++++---------- source/tests/test_pairwise_dprc.py | 2 +- source/tests/test_polar_se_a.py | 2 +- source/tests/test_polar_se_a_tebd.py | 2 +- source/tests/test_prod_env_mat.py | 2 +- source/tests/test_prod_force.py | 2 +- source/tests/test_prod_force_grad.py | 2 +- source/tests/test_prod_virial.py | 2 +- source/tests/test_prod_virial_grad.py | 2 +- source/tests/test_tab_nonsmth.py | 2 +- source/tests/test_tab_smooth.py | 2 +- source/tests/test_type_embed.py | 4 ++-- source/tests/test_type_one_side.py | 4 ++-- 50 files changed, 90 insertions(+), 90 deletions(-) diff --git a/source/tests/test_activation_fn_gelu.py b/source/tests/test_activation_fn_gelu.py index 6ecbd0154f..b1c30eeefc 100644 --- a/source/tests/test_activation_fn_gelu.py +++ b/source/tests/test_activation_fn_gelu.py @@ -17,7 +17,7 @@ class TestGelu(tf.test.TestCase): def setUp(self): self.places = 6 - self.sess = self.test_session().__enter__() + self.sess = self.cached_session().__enter__() self.inputs = tf.reshape( tf.constant([0.0, 1.0, 2.0, 3.0], dtype=tf.float64), [-1, 1] ) diff --git a/source/tests/test_data_large_batch.py b/source/tests/test_data_large_batch.py index 3ae46e8cb9..5750f956f8 100644 --- a/source/tests/test_data_large_batch.py +++ b/source/tests/test_data_large_batch.py @@ -180,7 +180,7 @@ def test_data_mixed_type(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) @@ -376,7 +376,7 @@ def test_stripped_data_mixed_type(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) @@ -572,7 +572,7 @@ def test_compressible_data_mixed_type(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) diff --git a/source/tests/test_data_modifier.py b/source/tests/test_data_modifier.py index dfc602fd92..368a60d68a 100644 --- a/source/tests/test_data_modifier.py +++ b/source/tests/test_data_modifier.py @@ -80,7 +80,7 @@ def _setUp(self): model.build(data) # freeze the graph - with self.test_session() as sess: + with self.cached_session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) graph = tf.get_default_graph() diff --git a/source/tests/test_data_modifier_shuffle.py b/source/tests/test_data_modifier_shuffle.py index 151caa9e16..9ddbb8ee29 100644 --- a/source/tests/test_data_modifier_shuffle.py +++ b/source/tests/test_data_modifier_shuffle.py @@ -81,7 +81,7 @@ def _setUp(self): model.build(data) # freeze the graph - with self.test_session() as sess: + with self.cached_session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) graph = tf.get_default_graph() diff --git a/source/tests/test_descrpt_hybrid.py b/source/tests/test_descrpt_hybrid.py index ed39c04307..317f6ea5a0 100644 --- a/source/tests/test_descrpt_hybrid.py +++ b/source/tests/test_descrpt_hybrid.py @@ -115,7 +115,7 @@ def test_descriptor_hybrid(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [model_dout] = sess.run([dout], feed_dict=feed_dict_test) diff --git a/source/tests/test_descrpt_nonsmth.py b/source/tests/test_descrpt_nonsmth.py index 1d503e6c8c..fd3bb0b2f7 100644 --- a/source/tests/test_descrpt_nonsmth.py +++ b/source/tests/test_descrpt_nonsmth.py @@ -160,7 +160,7 @@ class TestNonSmooth(Inter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - Inter.setUp(self, data, sess=self.test_session().__enter__()) + Inter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, suffix="_se") @@ -180,8 +180,8 @@ def test_pbc(self): data = Data() inter0 = Inter() inter1 = Inter() - inter0.setUp(data, pbc=True, sess=self.test_session().__enter__()) - inter1.setUp(data, pbc=False, sess=self.test_session().__enter__()) + inter0.setUp(data, pbc=True, sess=self.cached_session().__enter__()) + inter1.setUp(data, pbc=False, sess=self.cached_session().__enter__()) inter0.net_w_i = np.copy(np.ones(inter0.ndescrpt)) inter1.net_w_i = np.copy(np.ones(inter1.ndescrpt)) @@ -233,8 +233,8 @@ def test_pbc_small_box(self): data1 = Data(box_scale=2) inter0 = Inter() inter1 = Inter() - inter0.setUp(data0, pbc=True, sess=self.test_session().__enter__()) - inter1.setUp(data1, pbc=False, sess=self.test_session().__enter__()) + inter0.setUp(data0, pbc=True, sess=self.cached_session().__enter__()) + inter1.setUp(data1, pbc=False, sess=self.cached_session().__enter__()) inter0.net_w_i = np.copy(np.ones(inter0.ndescrpt)) inter1.net_w_i = np.copy(np.ones(inter1.ndescrpt)) diff --git a/source/tests/test_descrpt_se_a_mask.py b/source/tests/test_descrpt_se_a_mask.py index 30c514a2cc..85cd1cc2a1 100644 --- a/source/tests/test_descrpt_se_a_mask.py +++ b/source/tests/test_descrpt_se_a_mask.py @@ -277,7 +277,7 @@ def test_descriptor_se_a_mask(self): t_aparam: test_data["aparam"][:numb_test, :], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [op_dout] = sess.run([dout], feed_dict=feed_dict_test) op_dout = op_dout.reshape([-1]) diff --git a/source/tests/test_descrpt_se_a_type.py b/source/tests/test_descrpt_se_a_type.py index b10920b1d4..aeab18f149 100644 --- a/source/tests/test_descrpt_se_a_type.py +++ b/source/tests/test_descrpt_se_a_type.py @@ -120,7 +120,7 @@ def test_descriptor_two_sides(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [model_dout] = sess.run([dout], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) @@ -284,7 +284,7 @@ def test_descriptor_one_side(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [model_dout] = sess.run([dout], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) diff --git a/source/tests/test_descrpt_se_atten.py b/source/tests/test_descrpt_se_atten.py index e49e6ab3e2..76df651a46 100644 --- a/source/tests/test_descrpt_se_atten.py +++ b/source/tests/test_descrpt_se_atten.py @@ -141,7 +141,7 @@ def test_descriptor_two_sides(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [model_dout] = sess.run([dout], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) @@ -318,7 +318,7 @@ def test_descriptor_one_side(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [model_dout] = sess.run([dout], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) @@ -488,7 +488,7 @@ def test_stripped_type_embedding_descriptor_two_sides(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [model_dout] = sess.run([dout], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) @@ -666,7 +666,7 @@ def test_compressible_descriptor_two_sides(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [model_dout] = sess.run([dout], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) diff --git a/source/tests/test_descrpt_se_r.py b/source/tests/test_descrpt_se_r.py index c20515a5fa..779954a545 100644 --- a/source/tests/test_descrpt_se_r.py +++ b/source/tests/test_descrpt_se_r.py @@ -135,7 +135,7 @@ class TestSmooth(Inter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - Inter.setUp(self, data, sess=self.test_session().__enter__()) + Inter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, suffix="_se_r") @@ -155,8 +155,8 @@ def test_pbc(self): data = Data() inter0 = Inter() inter1 = Inter() - inter0.setUp(data, pbc=True, sess=self.test_session().__enter__()) - inter1.setUp(data, pbc=False, sess=self.test_session().__enter__()) + inter0.setUp(data, pbc=True, sess=self.cached_session().__enter__()) + inter1.setUp(data, pbc=False, sess=self.cached_session().__enter__()) inter0.net_w_i = np.copy(np.ones(inter0.ndescrpt)) inter1.net_w_i = np.copy(np.ones(inter1.ndescrpt)) @@ -208,8 +208,8 @@ def test_pbc_small_box(self): data1 = Data(box_scale=2) inter0 = Inter() inter1 = Inter() - inter0.setUp(data0, pbc=True, sess=self.test_session().__enter__()) - inter1.setUp(data1, pbc=False, sess=self.test_session().__enter__()) + inter0.setUp(data0, pbc=True, sess=self.cached_session().__enter__()) + inter1.setUp(data1, pbc=False, sess=self.cached_session().__enter__()) inter0.net_w_i = np.copy(np.ones(inter0.ndescrpt)) inter1.net_w_i = np.copy(np.ones(inter1.ndescrpt)) diff --git a/source/tests/test_descrpt_sea_ef.py b/source/tests/test_descrpt_sea_ef.py index e39afec97e..efd86854c7 100644 --- a/source/tests/test_descrpt_sea_ef.py +++ b/source/tests/test_descrpt_sea_ef.py @@ -154,7 +154,7 @@ class TestSmooth(Inter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - Inter.setUp(self, data, sess=self.test_session().__enter__()) + Inter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, suffix="_sea_ef") diff --git a/source/tests/test_descrpt_sea_ef_para.py b/source/tests/test_descrpt_sea_ef_para.py index 1ddcc4e196..1a109013cb 100644 --- a/source/tests/test_descrpt_sea_ef_para.py +++ b/source/tests/test_descrpt_sea_ef_para.py @@ -154,7 +154,7 @@ class TestSmooth(Inter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - Inter.setUp(self, data, sess=self.test_session().__enter__()) + Inter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, suffix="_sea_ef_para") diff --git a/source/tests/test_descrpt_sea_ef_rot.py b/source/tests/test_descrpt_sea_ef_rot.py index 10553b878d..d94565af96 100644 --- a/source/tests/test_descrpt_sea_ef_rot.py +++ b/source/tests/test_descrpt_sea_ef_rot.py @@ -17,7 +17,7 @@ class TestEfRot(tf.test.TestCase): def setUp(self): - self.sess = self.test_session().__enter__() + self.sess = self.cached_session().__enter__() self.natoms = [5, 5, 2, 3] self.ntypes = 2 self.sel_a = [12, 24] diff --git a/source/tests/test_descrpt_sea_ef_vert.py b/source/tests/test_descrpt_sea_ef_vert.py index dcbc418720..77ffb3150c 100644 --- a/source/tests/test_descrpt_sea_ef_vert.py +++ b/source/tests/test_descrpt_sea_ef_vert.py @@ -154,7 +154,7 @@ class TestSmooth(Inter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - Inter.setUp(self, data, sess=self.test_session().__enter__()) + Inter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, suffix="_sea_ef_vert") diff --git a/source/tests/test_descrpt_smooth.py b/source/tests/test_descrpt_smooth.py index aa0730cdea..59076e366e 100644 --- a/source/tests/test_descrpt_smooth.py +++ b/source/tests/test_descrpt_smooth.py @@ -153,7 +153,7 @@ class TestSmooth(Inter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - Inter.setUp(self, data, sess=self.test_session().__enter__()) + Inter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, suffix="_smth") @@ -173,8 +173,8 @@ def test_pbc(self): data = Data() inter0 = Inter() inter1 = Inter() - inter0.setUp(data, pbc=True, sess=self.test_session().__enter__()) - inter1.setUp(data, pbc=False, sess=self.test_session().__enter__()) + inter0.setUp(data, pbc=True, sess=self.cached_session().__enter__()) + inter1.setUp(data, pbc=False, sess=self.cached_session().__enter__()) inter0.net_w_i = np.copy(np.ones(inter0.ndescrpt)) inter1.net_w_i = np.copy(np.ones(inter1.ndescrpt)) @@ -226,8 +226,8 @@ def test_pbc_small_box(self): data1 = Data(box_scale=2) inter0 = Inter() inter1 = Inter() - inter0.setUp(data0, pbc=True, sess=self.test_session().__enter__()) - inter1.setUp(data1, pbc=False, sess=self.test_session().__enter__()) + inter0.setUp(data0, pbc=True, sess=self.cached_session().__enter__()) + inter1.setUp(data1, pbc=False, sess=self.cached_session().__enter__()) inter0.net_w_i = np.copy(np.ones(inter0.ndescrpt)) inter1.net_w_i = np.copy(np.ones(inter1.ndescrpt)) diff --git a/source/tests/test_dipole_se_a.py b/source/tests/test_dipole_se_a.py index 4e2fa9b30d..687e68c2be 100644 --- a/source/tests/test_dipole_se_a.py +++ b/source/tests/test_dipole_se_a.py @@ -111,7 +111,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [p, gp] = sess.run([dipole, gdipole], feed_dict=feed_dict_test) diff --git a/source/tests/test_dipole_se_a_tebd.py b/source/tests/test_dipole_se_a_tebd.py index f848526735..4b2e6d0688 100644 --- a/source/tests/test_dipole_se_a_tebd.py +++ b/source/tests/test_dipole_se_a_tebd.py @@ -129,7 +129,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [p, gp] = sess.run([dipole, gdipole], feed_dict=feed_dict_test) diff --git a/source/tests/test_embedding_net.py b/source/tests/test_embedding_net.py index f09ef74948..1b8c68c089 100644 --- a/source/tests/test_embedding_net.py +++ b/source/tests/test_embedding_net.py @@ -13,7 +13,7 @@ class Inter(tf.test.TestCase): def setUp(self): - self.sess = self.test_session().__enter__() + self.sess = self.cached_session().__enter__() self.inputs = tf.constant([0.0, 1.0, 2.0], dtype=tf.float64) self.ndata = 3 self.inputs = tf.reshape(self.inputs, [-1, 1]) diff --git a/source/tests/test_ewald.py b/source/tests/test_ewald.py index b6b925f801..ef2ace39a4 100644 --- a/source/tests/test_ewald.py +++ b/source/tests/test_ewald.py @@ -64,7 +64,7 @@ def setUp(self): def test_py_interface(self): hh = 1e-4 places = 4 - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() t_energy, t_force, t_virial = op_module.ewald_recp( self.coord, self.charge, @@ -91,7 +91,7 @@ def test_py_interface(self): def test_force(self): hh = 1e-4 places = 6 - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() t_energy, t_force, t_virial = op_module.ewald_recp( self.coord, self.charge, @@ -144,7 +144,7 @@ def test_force(self): def test_virial(self): hh = 1e-4 places = 6 - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() t_energy, t_force, t_virial = op_module.ewald_recp( self.coord, self.charge, diff --git a/source/tests/test_fitting_dos.py b/source/tests/test_fitting_dos.py index 95de81c32c..60a0ee4158 100644 --- a/source/tests/test_fitting_dos.py +++ b/source/tests/test_fitting_dos.py @@ -180,7 +180,7 @@ def test_fitting(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [pred_atom_dos] = sess.run([atom_dos], feed_dict=feed_dict_test) diff --git a/source/tests/test_fitting_ener_type.py b/source/tests/test_fitting_ener_type.py index 54621b634a..42190ef557 100644 --- a/source/tests/test_fitting_ener_type.py +++ b/source/tests/test_fitting_ener_type.py @@ -188,7 +188,7 @@ def test_fitting(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [pred_atom_ener] = sess.run([atom_ener], feed_dict=feed_dict_test) diff --git a/source/tests/test_layer_name.py b/source/tests/test_layer_name.py index 6de4a09736..c6a2f0b09c 100644 --- a/source/tests/test_layer_name.py +++ b/source/tests/test_layer_name.py @@ -137,7 +137,7 @@ def test_model(self): is_training: False, } - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) [e1, f1, v1, e2, f2, v2] = sess.run( [e_energy1, e_force1, e_virial1, e_energy2, e_force2, e_virial2], diff --git a/source/tests/test_linear_model.py b/source/tests/test_linear_model.py index 13a2bc4850..21f0f6efc8 100644 --- a/source/tests/test_linear_model.py +++ b/source/tests/test_linear_model.py @@ -94,7 +94,7 @@ def test_linear_ener_model(self): t_mesh: test_data["default_mesh"], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) e = np.reshape(e, [1, -1]) diff --git a/source/tests/test_model_dos.py b/source/tests/test_model_dos.py index 3562a5b9f9..c7160d4dda 100644 --- a/source/tests/test_model_dos.py +++ b/source/tests/test_model_dos.py @@ -116,7 +116,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [pred_dos, pred_atom_dos] = sess.run([dos, atom_dos], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_loc_frame.py b/source/tests/test_model_loc_frame.py index ed0fc3815a..c493013316 100644 --- a/source/tests/test_model_loc_frame.py +++ b/source/tests/test_model_loc_frame.py @@ -114,7 +114,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_multi.py b/source/tests/test_model_multi.py index 384f1e0553..9017da22e7 100644 --- a/source/tests/test_model_multi.py +++ b/source/tests/test_model_multi.py @@ -141,7 +141,7 @@ def test_model(self): t_mesh: test_data["default_mesh"], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() # test water energy sess.run(tf.global_variables_initializer()) diff --git a/source/tests/test_model_se_a.py b/source/tests/test_model_se_a.py index 65e42f43a0..d3b4323f0d 100644 --- a/source/tests/test_model_se_a.py +++ b/source/tests/test_model_se_a.py @@ -123,7 +123,7 @@ def test_model_atom_ener(self): t_mesh: test_data["default_mesh"], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) self.assertAlmostEqual(e[0], set_atom_ener[0], places=10) @@ -212,7 +212,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) @@ -347,7 +347,7 @@ def test_model_atom_ener_type_embedding(self): t_mesh: test_data["default_mesh"], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) self.assertAlmostEqual(e[0], set_atom_ener[0], places=10) diff --git a/source/tests/test_model_se_a_aparam.py b/source/tests/test_model_se_a_aparam.py index b236320d24..41111c57ee 100644 --- a/source/tests/test_model_se_a_aparam.py +++ b/source/tests/test_model_se_a_aparam.py @@ -115,7 +115,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_se_a_ebd.py b/source/tests/test_model_se_a_ebd.py index 96de277d2f..bf856b7bc5 100644 --- a/source/tests/test_model_se_a_ebd.py +++ b/source/tests/test_model_se_a_ebd.py @@ -115,7 +115,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_se_a_fparam.py b/source/tests/test_model_se_a_fparam.py index fad41947e2..cdb85157a4 100644 --- a/source/tests/test_model_se_a_fparam.py +++ b/source/tests/test_model_se_a_fparam.py @@ -116,7 +116,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_se_a_srtab.py b/source/tests/test_model_se_a_srtab.py index ff91af619b..98cab9e073 100644 --- a/source/tests/test_model_se_a_srtab.py +++ b/source/tests/test_model_se_a_srtab.py @@ -140,7 +140,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_se_a_type.py b/source/tests/test_model_se_a_type.py index 63d0ae279c..85e4a2916d 100644 --- a/source/tests/test_model_se_a_type.py +++ b/source/tests/test_model_se_a_type.py @@ -121,7 +121,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) diff --git a/source/tests/test_model_se_atten.py b/source/tests/test_model_se_atten.py index 6e6e9928a6..445959ceb2 100644 --- a/source/tests/test_model_se_atten.py +++ b/source/tests/test_model_se_atten.py @@ -132,7 +132,7 @@ def test_model(self): t_mesh: test_data["default_mesh"], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) @@ -258,7 +258,7 @@ def test_exclude_types(self): is_training: False, } - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) [des] = sess.run([dout], feed_dict=feed_dict_test1) @@ -357,7 +357,7 @@ def test_compressible_model(self): t_mesh: test_data["default_mesh"], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) @@ -485,7 +485,7 @@ def test_compressible_exclude_types(self): is_training: False, } - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) [des] = sess.run([dout], feed_dict=feed_dict_test1) @@ -587,7 +587,7 @@ def test_stripped_type_embedding_model(self): t_mesh: test_data["default_mesh"], is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) @@ -719,7 +719,7 @@ def test_stripped_type_embedding_exclude_types(self): is_training: False, } - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) [des] = sess.run([dout], feed_dict=feed_dict_test1) diff --git a/source/tests/test_model_se_r.py b/source/tests/test_model_se_r.py index 01151d8c30..94812308c6 100644 --- a/source/tests/test_model_se_r.py +++ b/source/tests/test_model_se_r.py @@ -111,7 +111,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_se_t.py b/source/tests/test_model_se_t.py index 300ad46a0a..1d67e852c7 100644 --- a/source/tests/test_model_se_t.py +++ b/source/tests/test_model_se_t.py @@ -109,7 +109,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_model_spin.py b/source/tests/test_model_spin.py index a264f38616..9bdf1d780a 100644 --- a/source/tests/test_model_spin.py +++ b/source/tests/test_model_spin.py @@ -122,7 +122,7 @@ def test_model_spin(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [out_ener, out_force, out_virial] = sess.run( [energy, force, virial], feed_dict=feed_dict_test diff --git a/source/tests/test_nvnmd_entrypoints.py b/source/tests/test_nvnmd_entrypoints.py index af0cd48146..3e721516f1 100644 --- a/source/tests/test_nvnmd_entrypoints.py +++ b/source/tests/test_nvnmd_entrypoints.py @@ -454,7 +454,7 @@ def test_model_qnn_v0(self): dic_ph["default_mesh"]: mesh_dat, } # - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) # get tensordic keys = "o_descriptor,o_rmat,o_energy".split(",") @@ -762,7 +762,7 @@ def test_model_qnn_v1(self): dic_ph["default_mesh"]: mesh_dat, } # - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) # get tensordic keys = "o_descriptor,o_rmat,o_energy".split(",") @@ -818,7 +818,7 @@ def test_model_qnn_v1(self): ref_dout = 60.73941362 np.testing.assert_almost_equal(pred, ref_dout, 8) # test freeze - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() weight_file1 = str(tests_path / "nvnmd" / "ref" / "weight_v1_cnn.npy") weight_file2 = str(tests_path / "nvnmd" / "out" / "weight_v1_qnn.npy") save_weight(sess, weight_file2) diff --git a/source/tests/test_nvnmd_op.py b/source/tests/test_nvnmd_op.py index 2b59b9ef94..3419b375e4 100644 --- a/source/tests/test_nvnmd_op.py +++ b/source/tests/test_nvnmd_op.py @@ -17,7 +17,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -110,7 +110,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -140,7 +140,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -166,7 +166,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -192,7 +192,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -238,7 +238,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -284,7 +284,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -330,7 +330,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -376,7 +376,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph @@ -402,7 +402,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() def test_op(self): # graph diff --git a/source/tests/test_pairwise_dprc.py b/source/tests/test_pairwise_dprc.py index 2ea5888b60..04aaa237b1 100644 --- a/source/tests/test_pairwise_dprc.py +++ b/source/tests/test_pairwise_dprc.py @@ -349,7 +349,7 @@ def test_model_ener(self): t_aparam: np.reshape(np.tile(test_data["aparam"], 5), [-1]), is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) diff --git a/source/tests/test_polar_se_a.py b/source/tests/test_polar_se_a.py index 1933816488..2564dc0656 100644 --- a/source/tests/test_polar_se_a.py +++ b/source/tests/test_polar_se_a.py @@ -110,7 +110,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [p, gp] = sess.run([polar, gpolar], feed_dict=feed_dict_test) diff --git a/source/tests/test_polar_se_a_tebd.py b/source/tests/test_polar_se_a_tebd.py index 284cb46498..570c4261d9 100644 --- a/source/tests/test_polar_se_a_tebd.py +++ b/source/tests/test_polar_se_a_tebd.py @@ -128,7 +128,7 @@ def test_model(self): is_training: False, } - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) [p, gp] = sess.run([polar, gpolar], feed_dict=feed_dict_test) diff --git a/source/tests/test_prod_env_mat.py b/source/tests/test_prod_env_mat.py index cf0b9e9296..663b991831 100644 --- a/source/tests/test_prod_env_mat.py +++ b/source/tests/test_prod_env_mat.py @@ -11,7 +11,7 @@ class TestProdEnvMat(tf.test.TestCase): def setUp(self): - self.sess = self.test_session().__enter__() + self.sess = self.cached_session().__enter__() self.nframes = 2 self.dcoord = [ 12.83, diff --git a/source/tests/test_prod_force.py b/source/tests/test_prod_force.py index e0497d0b7e..83a44c0be9 100644 --- a/source/tests/test_prod_force.py +++ b/source/tests/test_prod_force.py @@ -18,7 +18,7 @@ def setUp(self): config.graph_options.rewrite_options.custom_optimizers.add().name = ( "dpparallel" ) - self.sess = self.test_session(config=config).__enter__() + self.sess = self.cached_session(config=config).__enter__() self.nframes = 2 self.dcoord = [ 12.83, diff --git a/source/tests/test_prod_force_grad.py b/source/tests/test_prod_force_grad.py index a7eaeb7511..012def217f 100644 --- a/source/tests/test_prod_force_grad.py +++ b/source/tests/test_prod_force_grad.py @@ -10,7 +10,7 @@ class TestProdForceGrad(tf.test.TestCase): def setUp(self): - self.sess = self.test_session().__enter__() + self.sess = self.cached_session().__enter__() self.nframes = 2 self.dcoord = [ 12.83, diff --git a/source/tests/test_prod_virial.py b/source/tests/test_prod_virial.py index 29f71daf68..2abcfcb1bf 100644 --- a/source/tests/test_prod_virial.py +++ b/source/tests/test_prod_virial.py @@ -10,7 +10,7 @@ class TestProdVirial(tf.test.TestCase): def setUp(self): - self.sess = self.test_session().__enter__() + self.sess = self.cached_session().__enter__() self.nframes = 2 self.dcoord = [ 12.83, diff --git a/source/tests/test_prod_virial_grad.py b/source/tests/test_prod_virial_grad.py index f7d6cfe92d..548b63a54b 100644 --- a/source/tests/test_prod_virial_grad.py +++ b/source/tests/test_prod_virial_grad.py @@ -10,7 +10,7 @@ class TestProdVirialGrad(tf.test.TestCase): def setUp(self): - self.sess = self.test_session().__enter__() + self.sess = self.cached_session().__enter__() self.nframes = 2 self.dcoord = [ 12.83, diff --git a/source/tests/test_tab_nonsmth.py b/source/tests/test_tab_nonsmth.py index d6df226478..9e3f9ff640 100644 --- a/source/tests/test_tab_nonsmth.py +++ b/source/tests/test_tab_nonsmth.py @@ -178,7 +178,7 @@ class TestTabNonSmooth(IntplInter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - IntplInter.setUp(self, data, sess=self.test_session().__enter__()) + IntplInter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, places=5, suffix="_tab") diff --git a/source/tests/test_tab_smooth.py b/source/tests/test_tab_smooth.py index 220ba4e3f3..49b18e14f3 100644 --- a/source/tests/test_tab_smooth.py +++ b/source/tests/test_tab_smooth.py @@ -175,7 +175,7 @@ class TestTabSmooth(IntplInter, tf.test.TestCase): def setUp(self): self.places = 5 data = Data() - IntplInter.setUp(self, data, sess=self.test_session().__enter__()) + IntplInter.setUp(self, data, sess=self.cached_session().__enter__()) def test_force(self): force_test(self, self, places=5, suffix="_tab_smth") diff --git a/source/tests/test_type_embed.py b/source/tests/test_type_embed.py index 47de16cbdc..3e79bad70b 100644 --- a/source/tests/test_type_embed.py +++ b/source/tests/test_type_embed.py @@ -23,14 +23,14 @@ def test_embed_atom_type(self): ) expected_out = [[1, 2, 3], [1, 2, 3], [1, 2, 3], [7, 7, 7], [7, 7, 7]] atom_embed = embed_atom_type(ntypes, natoms, type_embedding) - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() atom_embed = sess.run(atom_embed) np.testing.assert_almost_equal(atom_embed, expected_out, 10) def test_type_embed_net(self): ten = TypeEmbedNet([2, 4, 8], seed=1, uniform_seed=True) type_embedding = ten.build(2) - sess = self.test_session().__enter__() + sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) type_embedding = sess.run(type_embedding) diff --git a/source/tests/test_type_one_side.py b/source/tests/test_type_one_side.py index e16ecd2b12..8e7c173912 100644 --- a/source/tests/test_type_one_side.py +++ b/source/tests/test_type_one_side.py @@ -125,7 +125,7 @@ def test_descriptor_one_side_exclude_types(self): feed_dict_test2[t_type] = np.reshape(new_type2[:numb_test, :], [-1]) feed_dict_test2[t_natoms] = new_natoms2 - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) [model_dout1] = sess.run([dout], feed_dict=feed_dict_test1) [model_dout2] = sess.run([dout], feed_dict=feed_dict_test2) @@ -231,7 +231,7 @@ def test_se_r_one_side_exclude_types(self): feed_dict_test2[t_type] = np.reshape(new_type2[:numb_test, :], [-1]) feed_dict_test2[t_natoms] = new_natoms2 - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) [model_dout1] = sess.run([dout], feed_dict=feed_dict_test1) [model_dout2] = sess.run([dout], feed_dict=feed_dict_test2) From 1a4a7ca4b65932e7952c0a7b0d26f7f6c815e52f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yifan=20Li=E6=9D=8E=E4=B8=80=E5=B8=86?= Date: Thu, 14 Sep 2023 22:55:50 -0500 Subject: [PATCH 13/18] lmp: let fparam_do_compute not execute by default (#2819) One should set the variable do_compute in pair_deepmd.cpp false by default, so that fparam can be used correctly. The current version will trigger the error https://github.com/deepmodeling/deepmd-kit/blob/7da9aaf075e8dea1eca9a08f99f3917235b55e3b/source/lmp/pair_deepmd.cpp#L1044-L1046 unexpectedly. --- source/lmp/pair_deepmd.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index ec53a1dc99..489c31ff19 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -382,6 +382,7 @@ PairDeepMD::PairDeepMD(LAMMPS *lmp) eps_v = 0.; scale = NULL; do_ttm = false; + do_compute = false; single_model = false; multi_models_mod_devi = false; multi_models_no_mod_devi = false; From 20a41d04bdbf6058d6af9a8ccf3070b4aff2f882 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 15 Sep 2023 00:19:56 -0400 Subject: [PATCH 14/18] speed up GitHub Actions (#2822) This PR speeds up multiple GitHub Actions in the following way: - only install `cuda-nvcc` and `cuda-cudart-dev` instead of the whole cudatoolkit - skip installing clang as it's already shipped with the GitHub Action image - enable cache for all `setup-python` - use Ninja instead of Make as the CMake generator --------- Signed-off-by: Jinzhe Zeng --- .github/workflows/build_cc.yml | 14 ++++++++++---- .github/workflows/build_wheel.yml | 1 + .github/workflows/test_cc.yml | 3 +++ source/install/build_cc.sh | 4 ++-- source/install/build_from_c.sh | 6 +++--- source/install/package_c.sh | 4 ++-- source/install/test_cc.sh | 4 ++-- source/install/test_cc_local.sh | 4 ++-- 8 files changed, 25 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build_cc.yml b/.github/workflows/build_cc.yml index 500b305ba9..55a5a5c4d8 100644 --- a/.github/workflows/build_cc.yml +++ b/.github/workflows/build_cc.yml @@ -26,14 +26,20 @@ jobs: - uses: actions/setup-python@v4 with: python-version: '3.11' + cache: 'pip' + - uses: lukka/get-cmake@latest - run: python -m pip install tensorflow - - run: sudo apt-get update && sudo apt-get install -y nvidia-cuda-toolkit + - run: | + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \ + && sudo dpkg -i cuda-keyring_1.0-1_all.deb \ + && sudo apt-get update \ + && sudo apt-get -y install cuda-cudart-dev-11-8 cuda-nvcc-11-8 if: matrix.variant == 'cuda' - run: | wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \ && sudo dpkg -i cuda-keyring_1.0-1_all.deb \ && sudo apt-get update \ - && sudo apt-get -y install cuda-12-0 + && sudo apt-get -y install cuda-cudart-dev-12-0 cuda-nvcc-12-0 if: matrix.variant == 'cuda120' env: DEBIAN_FRONTEND: noninteractive @@ -44,12 +50,11 @@ jobs: && sudo apt-get update \ && sudo apt-get install -y rocm-dev hipcub-dev if: matrix.variant == 'rocm' - - run: sudo apt-get update && sudo apt-get install -y clang - if: matrix.variant == 'clang' - run: source/install/build_cc.sh env: DP_VARIANT: ${{ matrix.dp_variant }} DOWNLOAD_TENSORFLOW: "FALSE" + CMAKE_GENERATOR: Ninja if: matrix.variant != 'clang' - run: source/install/build_cc.sh env: @@ -57,6 +62,7 @@ jobs: DOWNLOAD_TENSORFLOW: "FALSE" CC: clang CXX: clang++ + CMAKE_GENERATOR: Ninja if: matrix.variant == 'clang' - name: Test files exist run: | diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index 3e3a265159..85b2d6b884 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -67,6 +67,7 @@ jobs: name: Install Python with: python-version: '3.11' + cache: 'pip' - run: python -m pip install build - name: Build sdist run: python -m build --sdist diff --git a/.github/workflows/test_cc.yml b/.github/workflows/test_cc.yml index 9404f12937..fa37009730 100644 --- a/.github/workflows/test_cc.yml +++ b/.github/workflows/test_cc.yml @@ -11,10 +11,12 @@ jobs: - uses: actions/setup-python@v4 with: python-version: '3.11' + cache: 'pip' - name: Setup MPI uses: mpi4py/setup-mpi@v1 with: mpi: mpich + - uses: lukka/get-cmake@latest - run: python -m pip install tensorflow - run: source/install/test_cc_local.sh env: @@ -22,6 +24,7 @@ jobs: TF_INTRA_OP_PARALLELISM_THREADS: 1 TF_INTER_OP_PARALLELISM_THREADS: 1 LMP_CXX11_ABI_0: 1 + CMAKE_GENERATOR: Ninja # test lammps # ASE issue: https://gitlab.com/ase/ase/-/merge_requests/2843 # TODO: remove ase version when ase has new release diff --git a/source/install/build_cc.sh b/source/install/build_cc.sh index 7cb9ca38ad..bfa3cd1ce4 100755 --- a/source/install/build_cc.sh +++ b/source/install/build_cc.sh @@ -21,8 +21,8 @@ BUILD_TMP_DIR=${SCRIPT_PATH}/../build mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DUSE_TF_PYTHON_LIBS=TRUE ${CUDA_ARGS} -DLAMMPS_VERSION=stable_2Aug2023 .. -make -j${NPROC} -make install +cmake --build . -j${NPROC} +cmake --install . #------------------ echo "Congratulations! DeePMD-kit has been installed at ${INSTALL_PREFIX}" diff --git a/source/install/build_from_c.sh b/source/install/build_from_c.sh index b64a62eaff..3a48d3d46c 100755 --- a/source/install/build_from_c.sh +++ b/source/install/build_from_c.sh @@ -14,9 +14,9 @@ BUILD_TMP_DIR=${SCRIPT_PATH}/../build mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DDEEPMD_C_ROOT=${DEEPMD_C_ROOT} -DLAMMPS_VERSION=stable_2Aug2023 .. -make -j${NPROC} -make install -make lammps +cmake --build . -j${NPROC} +cmake --install . +cmake --build . --target=lammps #------------------ echo "Congratulations! DeePMD-kit has been installed at ${INSTALL_PREFIX}" diff --git a/source/install/package_c.sh b/source/install/package_c.sh index c250956e19..0c145b22af 100755 --- a/source/install/package_c.sh +++ b/source/install/package_c.sh @@ -20,8 +20,8 @@ cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ -DPACKAGE_C=TRUE \ -DUSE_TF_PYTHON_LIBS=TRUE \ .. -make -j${NPROC} -make install +cmake --build . -j${NPROC} +cmake --install . #------------------ diff --git a/source/install/test_cc.sh b/source/install/test_cc.sh index 6da5962899..55fe03bad8 100755 --- a/source/install/test_cc.sh +++ b/source/install/test_cc.sh @@ -12,8 +12,8 @@ BUILD_TMP_DIR=${SCRIPT_PATH}/../build_tests mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} cmake -DINSTALL_TENSORFLOW=TRUE -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DTENSORFLOW_ROOT=${INSTALL_PREFIX} -DBUILD_TESTING:BOOL=TRUE -DLAMMPS_VERSION=stable_2Aug2023 .. -make -j${NPROC} -make install +cmake --build . -j${NPROC} +cmake --install . #------------------ # go to a subdirectory... diff --git a/source/install/test_cc_local.sh b/source/install/test_cc_local.sh index 5a400a15c2..ec1bfadd69 100755 --- a/source/install/test_cc_local.sh +++ b/source/install/test_cc_local.sh @@ -13,8 +13,8 @@ BUILD_TMP_DIR=${SCRIPT_PATH}/../build_tests mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} cmake -DINSTALL_TENSORFLOW=FALSE -DUSE_TF_PYTHON_LIBS=TRUE -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DBUILD_TESTING:BOOL=TRUE -DLAMMPS_VERSION=stable_2Aug2023 .. -make -j${NPROC} -make install +cmake --build . -j${NPROC} +cmake --install . #------------------ # go to a subdirectory... From 5591ed154e3fe1aa0ccb288f2e8312ec51817c27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yifan=20Li=E6=9D=8E=E4=B8=80=E5=B8=86?= Date: Fri, 15 Sep 2023 00:09:49 -0500 Subject: [PATCH 15/18] fix grammatical errors (#2796) Fix grammatical errors in the document. --- doc/development/type-embedding.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/development/type-embedding.md b/doc/development/type-embedding.md index a027ebdf26..5919d6c944 100644 --- a/doc/development/type-embedding.md +++ b/doc/development/type-embedding.md @@ -1,6 +1,6 @@ # Atom Type Embedding ## Overview -Here is an overview of the DeePMD-kit algorithm. Given a specific centric atom, we can obtain the matrix describing its local environment, named $\mathcal R$. It is consist of the distance between the centric atom and its neighbors, as well as a direction vector. We can embed each distance into a vector of $M_1$ dimension by an `embedding net`, so the environment matrix $\mathcal R$ can be embedded into matrix $\mathcal G$. We can thus extract a descriptor vector (of $M_1 \times M_2$ dim) of the centric atom from the $\mathcal G$ by some matrix multiplication, and put the descriptor into `fitting net` to get predicted energy $E$. The vanilla version of DeePMD-kit builds `embedding net` and `fitting net` relying on the atom type, resulting in $O(N)$ memory usage. After applying atom type embedding, in DeePMD-kit v2.0, we can share one `embedding net` and one `fitting net` in total, which decline training complexity largely. +Here is an overview of the DeePMD-kit algorithm. Given a specific centric atom, we can obtain the matrix describing its local environment, named $\mathcal R$. It consists of the distance between the centric atom and its neighbors, as well as a direction vector. We can embed each distance into a vector of $M_1$ dimension by an `embedding net`, so the environment matrix $\mathcal R$ can be embedded into matrix $\mathcal G$. We can thus extract a descriptor vector (of $M_1 \times M_2$ dim) of the centric atom from the $\mathcal G$ by some matrix multiplication, and put the descriptor into `fitting net` to get the predicted energy $E$. The vanilla version of DeePMD-kit builds `embedding net` and `fitting net` relying on the atom type, resulting in $O(N)$ memory usage. After applying atom type embedding, in DeePMD-kit v2.0, we can share one `embedding net` and one `fitting net` in total, which reduces training complexity largely. ## Preliminary In the following chart, you can find the meaning of symbols used to clarify the atom-type embedding algorithm. From 21db464245d950f6cacb83d46111e287833bfa32 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 15 Sep 2023 01:10:37 -0400 Subject: [PATCH 16/18] improve configurations of Python lint tools (#2823) 1. use `black-pre-commit-mirror` instead of `black` which is faster; 2. first ruff and then black; 3. remove `tool.ruff.target-version` which can be detected automatically; 4. add `RUF` and `NPY` rules to `tool.ruff.select`; 5. set `tool.ruff.pydocstyle.convention` to `numpy`, which can automatically add several rules to `ignore`. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- data/raw/shuffle_raw.py | 3 ++- deepmd/calculator.py | 11 +++++++++-- deepmd/descriptor/se_a.py | 6 +++--- deepmd/descriptor/se_a_ebd.py | 2 +- deepmd/descriptor/se_a_mask.py | 2 +- deepmd/descriptor/se_atten.py | 4 ++-- deepmd/descriptor/se_r.py | 2 +- deepmd/descriptor/se_t.py | 2 +- deepmd/entrypoints/ipi.py | 2 +- deepmd/fit/dos.py | 8 ++++---- deepmd/infer/deep_tensor.py | 4 +++- deepmd/loss/ener.py | 4 ++-- deepmd/model/model_stat.py | 2 +- deepmd/nvnmd/entrypoints/wrap.py | 2 +- deepmd/train/run_options.py | 4 ++-- deepmd/train/trainer.py | 8 ++++---- deepmd/utils/argcheck.py | 15 +++++++++------ deepmd/utils/data_system.py | 4 +--- deepmd/utils/finetune.py | 6 ++---- deepmd/utils/multi_init.py | 4 ++-- deepmd/utils/network.py | 4 ++-- deepmd/utils/path.py | 2 +- deepmd/utils/spin.py | 7 ++++--- deepmd_cli/main.py | 2 +- pyproject.toml | 11 +++++------ source/install/build_tf.py | 2 +- source/tests/common.py | 4 +--- source/tests/test_argument_parser.py | 2 +- source/tests/test_descrpt_sea_ef_rot.py | 8 ++++---- source/tests/test_fitting_stat.py | 4 ++-- 31 files changed, 78 insertions(+), 71 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d39f5ec127..19c29c0322 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,10 +22,6 @@ repos: - id: check-symlinks - id: check-toml # Python -- repo: https://github.com/psf/black - rev: 23.9.1 - hooks: - - id: black-jupyter - repo: https://github.com/PyCQA/isort rev: 5.12.0 hooks: @@ -37,6 +33,10 @@ repos: hooks: - id: ruff args: ["--fix"] +- repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.9.1 + hooks: + - id: black-jupyter # numpydoc - repo: https://github.com/Carreau/velin rev: 0.0.12 diff --git a/data/raw/shuffle_raw.py b/data/raw/shuffle_raw.py index f8c689e3f2..51bb7466c9 100755 --- a/data/raw/shuffle_raw.py +++ b/data/raw/shuffle_raw.py @@ -69,7 +69,8 @@ def _main(): tmp = np.reshape(tmp, [nframe, -1]) nframe = tmp.shape[0] idx = np.arange(nframe) - np.random.shuffle(idx) + rng = np.random.default_rng() + rng.shuffle(idx) for ii in raws: data = np.loadtxt(inpath + "/" + ii) diff --git a/deepmd/calculator.py b/deepmd/calculator.py index acef657e2c..8636ff30d2 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -6,6 +6,7 @@ ) from typing import ( TYPE_CHECKING, + ClassVar, Dict, List, Optional, @@ -69,13 +70,19 @@ class DP(Calculator): """ name = "DP" - implemented_properties = ["energy", "free_energy", "forces", "virial", "stress"] + implemented_properties: ClassVar[List[str]] = [ + "energy", + "free_energy", + "forces", + "virial", + "stress", + ] def __init__( self, model: Union[str, "Path"], label: str = "DP", - type_dict: Dict[str, int] = None, + type_dict: Optional[Dict[str, int]] = None, **kwargs, ) -> None: Calculator.__init__(self, label=label, **kwargs) diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index 82df8cc1a3..cceb72d4fb 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -144,7 +144,7 @@ class DescrptSeA(DescrptSe): .. [1] Linfeng Zhang, Jiequn Han, Han Wang, Wissam A. Saidi, Roberto Car, and E. Weinan. 2018. End-to-end symmetry preserving inter-atomic potential energy model for finite and extended systems. In Proceedings of the 32nd International Conference on Neural Information Processing - Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441–4451. + Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441-4451. """ def __init__( @@ -890,7 +890,7 @@ def _filter_lower( suffix="", ): """Input env matrix, returns R.G.""" - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, incrs_index * 4]) @@ -1006,7 +1006,7 @@ def _filter( nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0] # natom x (nei x 4) shape = inputs.get_shape().as_list() - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] outputs_size_2 = self.n_axis_neuron all_excluded = all( (type_input, type_i) in self.exclude_types for type_i in range(self.ntypes) diff --git a/deepmd/descriptor/se_a_ebd.py b/deepmd/descriptor/se_a_ebd.py index f46444169e..4816ec1569 100644 --- a/deepmd/descriptor/se_a_ebd.py +++ b/deepmd/descriptor/se_a_ebd.py @@ -230,7 +230,7 @@ def _embedding_net( # natom x (nei x 4) inputs = tf.reshape(inputs, [-1, self.ndescrpt]) shape = inputs.get_shape().as_list() - outputs_size = [1] + filter_neuron + outputs_size = [1, *filter_neuron] with tf.variable_scope(name, reuse=reuse): xyz_scatter_total = [] # with natom x (nei x 4) diff --git a/deepmd/descriptor/se_a_mask.py b/deepmd/descriptor/se_a_mask.py index cdec33e292..e4625922cc 100644 --- a/deepmd/descriptor/se_a_mask.py +++ b/deepmd/descriptor/se_a_mask.py @@ -112,7 +112,7 @@ class DescrptSeAMask(DescrptSeA): .. [1] Linfeng Zhang, Jiequn Han, Han Wang, Wissam A. Saidi, Roberto Car, and E. Weinan. 2018. End-to-end symmetry preserving inter-atomic potential energy model for finite and extended systems. In Proceedings of the 32nd International Conference on Neural Information Processing - Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441–4451. + Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441-4451. """ def __init__( diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py index 12558c45c4..c962952ec0 100644 --- a/deepmd/descriptor/se_atten.py +++ b/deepmd/descriptor/se_atten.py @@ -1057,7 +1057,7 @@ def _filter_lower( reuse=None, ): """Input env matrix, returns R.G.""" - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, incrs_index * 4]) @@ -1260,7 +1260,7 @@ def _filter( nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0] # natom x (nei x 4) shape = inputs.get_shape().as_list() - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] outputs_size_2 = self.n_axis_neuron start_index = 0 diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index ad9fda2238..fbc54a651f 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -638,7 +638,7 @@ def _filter_r( trainable=True, ): # natom x nei - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] diff --git a/deepmd/descriptor/se_t.py b/deepmd/descriptor/se_t.py index 34af8a90a2..671dbd4e15 100644 --- a/deepmd/descriptor/se_t.py +++ b/deepmd/descriptor/se_t.py @@ -633,7 +633,7 @@ def _filter( ): # natom x (nei x 4) shape = inputs.get_shape().as_list() - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] with tf.variable_scope(name, reuse=reuse): start_index_i = 0 result = None diff --git a/deepmd/entrypoints/ipi.py b/deepmd/entrypoints/ipi.py index b14b369e40..da287ff3de 100644 --- a/deepmd/entrypoints/ipi.py +++ b/deepmd/entrypoints/ipi.py @@ -24,7 +24,7 @@ def _program(name: str, args: List[str]): args : list of str list of arguments """ - return subprocess.call([os.path.join(ROOT_DIR, name)] + args, close_fds=False) + return subprocess.call([os.path.join(ROOT_DIR, name), *args], close_fds=False) def dp_ipi(): diff --git a/deepmd/fit/dos.py b/deepmd/fit/dos.py index 82018ea520..9a7cb734e5 100644 --- a/deepmd/fit/dos.py +++ b/deepmd/fit/dos.py @@ -98,8 +98,8 @@ def __init__( numb_aparam: int = 0, numb_dos: int = 300, rcond: Optional[float] = None, - trainable: List[bool] = None, - seed: int = None, + trainable: Optional[List[bool]] = None, + seed: Optional[int] = None, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, @@ -380,8 +380,8 @@ def build( self, inputs: tf.Tensor, natoms: tf.Tensor, - input_dict: dict = None, - reuse: bool = None, + input_dict: Optional[dict] = None, + reuse: Optional[bool] = None, suffix: str = "", ) -> tf.Tensor: """Build the computational graph for fitting net. diff --git a/deepmd/infer/deep_tensor.py b/deepmd/infer/deep_tensor.py index 367a8ab5e7..268523e959 100644 --- a/deepmd/infer/deep_tensor.py +++ b/deepmd/infer/deep_tensor.py @@ -1,6 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( TYPE_CHECKING, + ClassVar, + Dict, List, Optional, Tuple, @@ -39,7 +41,7 @@ class DeepTensor(DeepEval): The input map for tf.import_graph_def. Only work with default tf graph """ - tensors = { + tensors: ClassVar[Dict[str, str]] = { # descriptor attrs "t_ntypes": "descrpt_attr/ntypes:0", "t_rcut": "descrpt_attr/rcut:0", diff --git a/deepmd/loss/ener.py b/deepmd/loss/ener.py index 7895fadbf3..95997bad10 100644 --- a/deepmd/loss/ener.py +++ b/deepmd/loss/ener.py @@ -388,9 +388,9 @@ def __init__( limit_pref_ae: float = 0.0, start_pref_pf: float = 0.0, limit_pref_pf: float = 0.0, - relative_f: float = None, + relative_f: Optional[float] = None, enable_atom_ener_coeff: bool = False, - use_spin: list = None, + use_spin: Optional[list] = None, ) -> None: self.starter_learning_rate = starter_learning_rate self.start_pref_e = start_pref_e diff --git a/deepmd/model/model_stat.py b/deepmd/model/model_stat.py index 08bc162632..d2cc918b64 100644 --- a/deepmd/model/model_stat.py +++ b/deepmd/model/model_stat.py @@ -58,7 +58,7 @@ def make_stat_input(data, nbatches, merge_sys=True): def merge_sys_stat(all_stat): - first_key = list(all_stat.keys())[0] + first_key = next(iter(all_stat.keys())) nsys = len(all_stat[first_key]) ret = defaultdict(list) for ii in range(nsys): diff --git a/deepmd/nvnmd/entrypoints/wrap.py b/deepmd/nvnmd/entrypoints/wrap.py index 896e1e0342..455dd999df 100644 --- a/deepmd/nvnmd/entrypoints/wrap.py +++ b/deepmd/nvnmd/entrypoints/wrap.py @@ -145,7 +145,7 @@ def wrap(self): nvnmd_cfg.save(nvnmd_cfg.config_file) head = self.wrap_head(nhs, nws) # output model - hs = [] + head + hs = [*head] for d in datas: hs.extend(d) diff --git a/deepmd/train/run_options.py b/deepmd/train/run_options.py index ad1774908b..451632949e 100644 --- a/deepmd/train/run_options.py +++ b/deepmd/train/run_options.py @@ -45,7 +45,7 @@ # http://patorjk.com/software/taag. Font:Big" -WELCOME = ( # noqa +WELCOME = ( r" _____ _____ __ __ _____ _ _ _ ", r"| __ \ | __ \ | \/ || __ \ | | (_)| | ", r"| | | | ___ ___ | |__) || \ / || | | | ______ | | __ _ | |_ ", @@ -71,7 +71,7 @@ f"build float prec: {global_float_prec}", f"build variant: {GLOBAL_CONFIG['dp_variant']}", f"build with tf inc: {GLOBAL_CONFIG['tf_include_dir']}", - f"build with tf lib: {GLOBAL_CONFIG['tf_libs'].replace(';', _sep)}", # noqa + f"build with tf lib: {GLOBAL_CONFIG['tf_libs'].replace(';', _sep)}", ) diff --git a/deepmd/train/trainer.py b/deepmd/train/trainer.py index b322336b39..1f7b78045b 100644 --- a/deepmd/train/trainer.py +++ b/deepmd/train/trainer.py @@ -250,7 +250,7 @@ def build(self, data=None, stop_batch=0, origin_type_map=None, suffix=""): if not self.multi_task_mode: single_data = data else: - single_data = data[list(data.keys())[0]] + single_data = data[next(iter(data.keys()))] if self.ntypes < single_data.get_ntypes(): raise ValueError( "The number of types of the training data is %d, but that of the " @@ -373,7 +373,7 @@ def _build_network(self, data, suffix=""): if not self.multi_task_mode: self._get_place_horders(data.get_data_dict()) else: - self._get_place_horders(data[list(data.keys())[0]].get_data_dict()) + self._get_place_horders(data[next(iter(data.keys()))].get_data_dict()) self.place_holders["type"] = tf.placeholder(tf.int32, [None], name="t_type") self.place_holders["natoms_vec"] = tf.placeholder( @@ -467,7 +467,7 @@ def _build_training(self): var_list=trainable_variables, name="train_step", ) - train_ops = [apply_op] + self._extra_train_ops + train_ops = [apply_op, *self._extra_train_ops] self.train_op = tf.group(*train_ops) else: self.train_op = {} @@ -479,7 +479,7 @@ def _build_training(self): var_list=trainable_variables, name=f"train_step_{fitting_key}", ) - train_ops = [apply_op] + self._extra_train_ops + train_ops = [apply_op, *self._extra_train_ops] self.train_op[fitting_key] = tf.group(*train_ops) log.info("built training") diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 153824cb0d..f670feb578 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -177,7 +177,7 @@ def descrpt_se_a_args(): doc_axis_neuron = "Size of the submatrix of G (embedding matrix)." doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' - doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." + doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_trainable = "If the parameters in the embedding net is trainable" doc_seed = "Random seed for parameter initialization" @@ -263,7 +263,8 @@ def descrpt_se_a_tpe_args(): doc_type_nlayer = "number of hidden layers of type embedding net" doc_numb_aparam = "dimension of atomic parameter. if set to a value > 0, the atomic parameters are embedded." - return descrpt_se_a_args() + [ + return [ + *descrpt_se_a_args(), Argument("type_nchanl", int, optional=True, default=4, doc=doc_type_nchanl), Argument("type_nlayer", int, optional=True, default=2, doc=doc_type_nlayer), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), @@ -280,7 +281,7 @@ def descrpt_se_r_args(): doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' - doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." + doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_trainable = "If the parameters in the embedding net are trainable" doc_seed = "Random seed for parameter initialization" @@ -344,7 +345,7 @@ def descrpt_se_atten_common_args(): doc_axis_neuron = "Size of the submatrix of G (embedding matrix)." doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' - doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." + doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_trainable = "If the parameters in the embedding net is trainable" doc_seed = "Random seed for parameter initialization" @@ -397,7 +398,8 @@ def descrpt_se_atten_args(): doc_smooth_type_embdding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True." doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used" - return descrpt_se_atten_common_args() + [ + return [ + *descrpt_se_atten_common_args(), Argument( "stripped_type_embedding", bool, @@ -422,7 +424,8 @@ def descrpt_se_atten_args(): def descrpt_se_atten_v2_args(): doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used" - return descrpt_se_atten_common_args() + [ + return [ + *descrpt_se_atten_common_args(), Argument( "set_davg_zero", bool, optional=True, default=False, doc=doc_set_davg_zero ), diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 0bfe6b7c70..0071da755c 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -618,9 +618,7 @@ def _check_type_map_consistency(self, type_map_list): min_len = min([len(ii), len(ret)]) for idx in range(min_len): if ii[idx] != ret[idx]: - raise RuntimeError( - f"inconsistent type map: {str(ret)} {str(ii)}" - ) + raise RuntimeError(f"inconsistent type map: {ret!s} {ii!s}") if len(ii) > len(ret): ret = ii return ret diff --git a/deepmd/utils/finetune.py b/deepmd/utils/finetune.py index b641a6beca..4e597b1e05 100644 --- a/deepmd/utils/finetune.py +++ b/deepmd/utils/finetune.py @@ -56,7 +56,7 @@ def replace_model_params_with_pretrained_model( if i not in pretrained_type_map: out_line_type.append(i) assert not out_line_type, ( - f"{str(out_line_type)} type(s) not contained in the pretrained model! " + f"{out_line_type!s} type(s) not contained in the pretrained model! " "Please choose another suitable one." ) if cur_type_map != pretrained_type_map: @@ -103,9 +103,7 @@ def replace_model_params_with_pretrained_model( # keep some params that are irrelevant to model structures (need to discuss) TODO if "trainable" in cur_para.keys(): target_para["trainable"] = cur_para["trainable"] - log.info( - f"Change the '{config_key}' from {str(cur_para)} to {str(target_para)}." - ) + log.info(f"Change the '{config_key}' from {cur_para!s} to {target_para!s}.") jdata["model"][config_key] = target_para return jdata, cur_type_map diff --git a/deepmd/utils/multi_init.py b/deepmd/utils/multi_init.py index fd56f715c5..6c070dc67e 100644 --- a/deepmd/utils/multi_init.py +++ b/deepmd/utils/multi_init.py @@ -54,7 +54,7 @@ def replace_model_params_with_frz_multi_model( if i not in pretrained_type_map: out_line_type.append(i) assert not out_line_type, ( - f"{str(out_line_type)} type(s) not contained in the pretrained model! " + f"{out_line_type!s} type(s) not contained in the pretrained model! " "Please choose another suitable one." ) if cur_type_map != pretrained_type_map: @@ -169,5 +169,5 @@ def _change_sub_config(jdata: Dict[str, Any], src_jdata: Dict[str, Any], sub_key # keep some params that are irrelevant to model structures (need to discuss) TODO if "trainable" in cur_para.keys(): target_para["trainable"] = cur_para["trainable"] - log.info(f"Change the '{sub_key}' from {str(cur_para)} to {str(target_para)}.") + log.info(f"Change the '{sub_key}' from {cur_para!s} to {target_para!s}.") jdata[sub_key] = target_para diff --git a/deepmd/utils/network.py b/deepmd/utils/network.py index a2fd81b85c..36d8c42f82 100644 --- a/deepmd/utils/network.py +++ b/deepmd/utils/network.py @@ -183,11 +183,11 @@ def embedding_net( References ---------- .. [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identitymappings - in deep residual networks. InComputer Vision – ECCV 2016,pages 630–645. Springer + in deep residual networks. InComputer Vision - ECCV 2016,pages 630-645. Springer International Publishing, 2016. """ input_shape = xx.get_shape().as_list() - outputs_size = [input_shape[1]] + network_size + outputs_size = [input_shape[1], *network_size] for ii in range(1, len(outputs_size)): w_initializer = tf.random_normal_initializer( diff --git a/deepmd/utils/path.py b/deepmd/utils/path.py index 5206f44089..a8e4bc329f 100644 --- a/deepmd/utils/path.py +++ b/deepmd/utils/path.py @@ -114,7 +114,7 @@ def __str__(self) -> str: """Represent string.""" def __repr__(self) -> str: - return f"{type(self)} ({str(self)})" + return f"{type(self)} ({self!s})" def __eq__(self, other) -> bool: return str(self) == str(other) diff --git a/deepmd/utils/spin.py b/deepmd/utils/spin.py index c969a8062a..7820627649 100644 --- a/deepmd/utils/spin.py +++ b/deepmd/utils/spin.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( List, + Optional, ) from deepmd.env import ( @@ -24,9 +25,9 @@ class Spin: def __init__( self, - use_spin: List[bool] = None, - spin_norm: List[float] = None, - virtual_len: List[float] = None, + use_spin: Optional[List[bool]] = None, + spin_norm: Optional[List[float]] = None, + virtual_len: Optional[List[float]] = None, ) -> None: """Constructor.""" self.use_spin = use_spin diff --git a/deepmd_cli/main.py b/deepmd_cli/main.py index 94ceb9888d..fceca239ea 100644 --- a/deepmd_cli/main.py +++ b/deepmd_cli/main.py @@ -312,7 +312,7 @@ def main_parser() -> argparse.ArgumentParser: # The table is composed of fifth-order polynomial coefficients and is assembled # from two sub-tables. The first table takes the step(parameter) as it's uniform # step, while the second table takes 10 * step as it\s uniform step - #  The range of the first table is automatically detected by deepmd-kit, while the + # The range of the first table is automatically detected by deepmd-kit, while the # second table ranges from the first table's upper boundary(upper) to the # extrapolate(parameter) * upper. parser_compress = subparsers.add_parser( diff --git a/pyproject.toml b/pyproject.toml index b169a3b0eb..0ab9390efb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -166,24 +166,20 @@ profile = "black" force_grid_wrap = 1 [tool.ruff] -target-version = "py37" select = [ "E", # errors "F", # pyflakes "D", # pydocstyle "UP", # pyupgrade "C4", # flake8-comprehensions + "RUF", # ruff + "NPY", # numpy ] ignore = [ "E501", # line too long "F841", # local variable is assigned to but never used "E741", # ambiguous variable name "E402", # module level import not at top of file - "D413", # missing blank line after last section - "D416", # section name should end with a colon - "D203", # 1 blank line required before class docstring - "D107", # missing docstring in __init__ - "D213", # multi-line docstring summary should start at the second line "D100", # TODO: missing docstring in public module "D101", # TODO: missing docstring in public class "D102", # TODO: missing docstring in public method @@ -195,3 +191,6 @@ ignore = [ "D404", # TODO: first word of the docstring should not be This ] ignore-init-module-imports = true + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/source/install/build_tf.py b/source/install/build_tf.py index 043c4c6c81..15847d2c21 100755 --- a/source/install/build_tf.py +++ b/source/install/build_tf.py @@ -151,7 +151,7 @@ def __call__(self): if not self.exists: raise RuntimeError( f"Download {self.filename} from {self.url} failed! " - f"You can manually download it to {str(self.path)} and " + f"You can manually download it to {self.path!s} and " "retry the script." ) self.post_process() diff --git a/source/tests/common.py b/source/tests/common.py index e5dd1281f3..f8ed23df03 100644 --- a/source/tests/common.py +++ b/source/tests/common.py @@ -919,9 +919,7 @@ def check_type_map_consistency(self, type_map_list): min_len = min([len(ii), len(ret)]) for idx in range(min_len): if ii[idx] != ret[idx]: - raise RuntimeError( - f"inconsistent type map: {str(ret)} {str(ii)}" - ) + raise RuntimeError(f"inconsistent type map: {ret!s} {ii!s}") if len(ii) > len(ret): ret = ii return ret diff --git a/source/tests/test_argument_parser.py b/source/tests/test_argument_parser.py index 524499935c..bb8dd9ed62 100644 --- a/source/tests/test_argument_parser.py +++ b/source/tests/test_argument_parser.py @@ -184,7 +184,7 @@ def run_test(self, *, command: str, mapping: "TEST_DICT"): ) # test default values - cmd_args = [command] + required + cmd_args = [command, *required] buffer = StringIO() try: with redirect_stderr(buffer): diff --git a/source/tests/test_descrpt_sea_ef_rot.py b/source/tests/test_descrpt_sea_ef_rot.py index d94565af96..56cdb357b0 100644 --- a/source/tests/test_descrpt_sea_ef_rot.py +++ b/source/tests/test_descrpt_sea_ef_rot.py @@ -108,7 +108,7 @@ def make_test_data(self, nframes): one_type = [] for ii in range(2, 2 + self.ntypes): one_type = one_type + [ii - 2 for jj in range(self.natoms[ii])] - np.random.shuffle(one_type) + np.random.shuffle(one_type) # noqa: NPY002 one_type = np.array(one_type, dtype=int).reshape([1, -1]) dtype = np.tile(one_type, [nframes, 1]) defield = np.random.random(dcoord.shape) @@ -162,7 +162,7 @@ def test_rot_axis(self, suffix=""): ) self.sess.run(tf.global_variables_initializer()) - np.random.seed(0) + np.random.seed(0) # noqa: NPY002 # make test data nframes = 2 dcoord, dbox, dtype, defield = self.make_test_data(nframes) @@ -308,7 +308,7 @@ def test_rot_diff_axis(self, suffix=""): ) self.sess.run(tf.global_variables_initializer()) - np.random.seed(0) + np.random.seed(0) # noqa: NPY002 # make test data nframes = 2 dcoord, dbox, dtype, defield = self.make_test_data(nframes) @@ -423,7 +423,7 @@ def test_rot_field_corot(self, suffix=""): ) self.sess.run(tf.global_variables_initializer()) - np.random.seed(0) + np.random.seed(0) # noqa: NPY002 # make test data nframes = 2 dcoord, dbox, dtype, defield = self.make_test_data(nframes) diff --git a/source/tests/test_fitting_stat.py b/source/tests/test_fitting_stat.py index 045348440e..ad62c89f2a 100644 --- a/source/tests/test_fitting_stat.py +++ b/source/tests/test_fitting_stat.py @@ -28,12 +28,12 @@ def _make_fake_data(sys_natoms, sys_nframes, avgs, stds): tmp_data_a = [] for jj in range(ndof): tmp_data_f.append( - np.random.normal( + np.random.normal( # noqa: NPY002 loc=avgs[jj], scale=stds[jj], size=(sys_nframes[ii], 1) ) ) tmp_data_a.append( - np.random.normal( + np.random.normal( # noqa: NPY002 loc=avgs[jj], scale=stds[jj], size=(sys_nframes[ii], sys_natoms[ii]) ) ) From ab357f842a592f5484ed8075c22e588ca8ed622c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 15 Sep 2023 01:21:16 -0400 Subject: [PATCH 17/18] add citation for fparam (#2821) It seems missing... --- CITATIONS.bib | 13 +++++++++++++ doc/credits.rst | 7 +++++++ 2 files changed, 20 insertions(+) diff --git a/CITATIONS.bib b/CITATIONS.bib index 09f0f14acb..930b4fc2a5 100644 --- a/CITATIONS.bib +++ b/CITATIONS.bib @@ -105,6 +105,19 @@ @misc{Zhang_2022_DPA1 doi = {10.48550/arXiv.2208.08236}, } +@article{Zhang_PhysPlasmas_2020_v27_p122704, + annote = {frame-specific parameters (e.g. electronic temperature)}, + author = {Zhang, Yuzhi and Gao, Chang and Liu, Qianrui and Zhang, Linfeng and Wang, Han and Chen, Mohan}, + title = {{Warm dense matter simulation via electron temperature dependent deep potential molecular dynamics}}, + journal = {Phys. Plasmas}, + volume = {27}, + number = {12}, + pages = {122704}, + year = {2020}, + month = {12}, + doi = {10.1063/5.0023265}, +} + @article{Zhang_PhysRevB_2020_v102_p41121, annote = {fit dipole}, title={{Deep neural network for the dielectric response of insulators}}, diff --git a/doc/credits.rst b/doc/credits.rst index fad06e63ba..3612b8ace8 100644 --- a/doc/credits.rst +++ b/doc/credits.rst @@ -49,6 +49,13 @@ Cite DeePMD-kit and methods Zhang_2022_DPA1 +- If frame-specific parameters (`fparam`, e.g. electronic temperature) is used, + +.. bibliography:: + :filter: False + + Zhang_PhysPlasmas_2020_v27_p122704 + - If fitting dipole, .. bibliography:: From 0d5737f658592ef11dea091beaabb9524c74fb63 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 15 Sep 2023 01:59:54 -0400 Subject: [PATCH 18/18] make pairwise_dprc model work with MPI (#2818) - make `aparam` accepts `nall` instead of `nloc`. A variable `fitting_attr/aparam_nall` (dtype=bool) controls the behavior. - enable this behavior for se_a_mask, by the way - fix the shape of atomic energy, which is `nloc` instead of `nall` - set the minimal `nloc` to 1, as when nloc=0, many OPs (such as prod_force) throw floating-point exception - fix backward map when the shape of `nloc` is padded --------- Signed-off-by: Jinzhe Zeng --- deepmd/descriptor/se_a_mask.py | 4 +- deepmd/entrypoints/freeze.py | 1 + deepmd/model/pairwise_dprc.py | 7 +- source/api_c/include/c_api.h | 23 + source/api_c/include/c_api_internal.h | 2 + source/api_c/include/deepmd.hpp | 32 +- source/api_c/src/c_api.cc | 17 +- .../tests/test_deeppot_a_fparam_aparam.cc | 54 + source/api_cc/include/DeepPot.h | 21 + source/api_cc/include/common.h | 18 +- source/api_cc/src/DeepPot.cc | 113 +- source/api_cc/src/common.cc | 83 +- source/lib/src/pairwise.cc | 2 +- source/op/pairwise.cc | 10 +- source/tests/infer/pairwise_dprc.pbtxt | 44536 ++++++++++++++++ source/tests/test_pairwise_dprc.py | 181 + 16 files changed, 45014 insertions(+), 90 deletions(-) create mode 100644 source/tests/infer/pairwise_dprc.pbtxt diff --git a/deepmd/descriptor/se_a_mask.py b/deepmd/descriptor/se_a_mask.py index e4625922cc..b9181fd6b0 100644 --- a/deepmd/descriptor/se_a_mask.py +++ b/deepmd/descriptor/se_a_mask.py @@ -301,10 +301,12 @@ def build( dstd = self.dstd """ - ``aparam'' shape is [nframes, natoms] + ``aparam'' shape is [nframes, nall] aparam[:, :] is the real/virtual sign for each atom. """ aparam = input_dict["aparam"] + with tf.variable_scope("fitting_attr" + suffix, reuse=reuse): + t_aparam_nall = tf.constant(True, name="aparam_nall", dtype=tf.bool) self.mask = tf.cast(aparam, tf.int32) self.mask = tf.reshape(self.mask, [-1, natoms[1]]) diff --git a/deepmd/entrypoints/freeze.py b/deepmd/entrypoints/freeze.py index c39dd4ad61..11e0d55645 100755 --- a/deepmd/entrypoints/freeze.py +++ b/deepmd/entrypoints/freeze.py @@ -224,6 +224,7 @@ def _make_node_names( "spin_attr/ntypes_spin", "fitting_attr/dfparam", "fitting_attr/daparam", + "fitting_attr/aparam_nall", ] elif model_type == "dos": nodes += [ diff --git a/deepmd/model/pairwise_dprc.py b/deepmd/model/pairwise_dprc.py index bf158434b0..a9e154096a 100644 --- a/deepmd/model/pairwise_dprc.py +++ b/deepmd/model/pairwise_dprc.py @@ -125,6 +125,7 @@ def build( with tf.variable_scope("fitting_attr" + suffix, reuse=reuse): t_dfparam = tf.constant(0, name="dfparam", dtype=tf.int32) t_daparam = tf.constant(1, name="daparam", dtype=tf.int32) + t_aparam_nall = tf.constant(True, name="aparam_nall", dtype=tf.bool) with tf.variable_scope("descrpt_attr" + suffix, reuse=reuse): t_ntypes = tf.constant(self.ntypes, name="ntypes", dtype=tf.int32) t_rcut = tf.constant( @@ -222,12 +223,14 @@ def build( virial = virial_qm + virial_qmmm virial = tf.identity(virial, name="o_virial" + suffix) + backward_qm_map_nloc = tf.slice(backward_qm_map, [0, 0], [-1, natoms[0]]) + backward_qmmm_map_nloc = tf.slice(backward_qmmm_map, [0, 0], [-1, natoms[0]]) atom_ener_qm = gather_placeholder( - qm_dict["atom_ener"], backward_qm_map, placeholder=0.0 + qm_dict["atom_ener"], backward_qm_map_nloc, placeholder=0.0 ) atom_ener_qmmm = tf.math.segment_sum( gather_placeholder( - qmmm_dict["atom_ener"], backward_qmmm_map, placeholder=0.0 + qmmm_dict["atom_ener"], backward_qmmm_map_nloc, placeholder=0.0 ), qmmm_frame_idx, ) diff --git a/source/api_c/include/c_api.h b/source/api_c/include/c_api.h index 6aa1268123..b0c030962a 100644 --- a/source/api_c/include/c_api.h +++ b/source/api_c/include/c_api.h @@ -2,6 +2,9 @@ #pragma once #ifdef __cplusplus extern "C" { +#else +// for C99 +#include #endif /** @@ -717,6 +720,16 @@ int DP_DeepPotGetDimFParam(DP_DeepPot* dp); */ int DP_DeepPotGetDimAParam(DP_DeepPot* dp); +/** + * @brief Check whether the atomic dimension of atomic parameters is nall + * instead of nloc. + * + * @param[in] dp The DP to use. + * @return true the atomic dimension of atomic parameters is nall + * @return false the atomic dimension of atomic parameters is nloc + */ +bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp); + /** * @brief Get the type map of a DP. * @param[in] dp The DP to use. @@ -737,6 +750,16 @@ int DP_DeepPotModelDeviGetDimFParam(DP_DeepPotModelDevi* dp); */ int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp); +/** + * @brief Check whether the atomic dimension of atomic parameters is nall + * instead of nloc. + * + * @param[in] dp The DP Model Deviation to use. + * @return true the atomic dimension of atomic parameters is nall + * @return false the atomic dimension of atomic parameters is nloc + */ +bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp); + /** * @brief The deep tensor. **/ diff --git a/source/api_c/include/c_api_internal.h b/source/api_c/include/c_api_internal.h index 44bce2c696..85e1d2f421 100644 --- a/source/api_c/include/c_api_internal.h +++ b/source/api_c/include/c_api_internal.h @@ -41,6 +41,7 @@ struct DP_DeepPot { std::string exception; int dfparam; int daparam; + bool aparam_nall; }; struct DP_DeepPotModelDevi { @@ -51,6 +52,7 @@ struct DP_DeepPotModelDevi { std::string exception; int dfparam; int daparam; + bool aparam_nall; }; struct DP_DeepTensor { diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 532e01e805..71ff5b3dcc 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -597,6 +597,7 @@ class DeepPot { DP_CHECK_OK(DP_DeepPotCheckOK, dp); dfparam = DP_DeepPotGetDimFParam(dp); daparam = DP_DeepPotGetDimAParam(dp); + aparam_nall = DP_DeepPotIsAParamNAll(dp); }; /** @@ -771,9 +772,12 @@ class DeepPot { VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam); + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; @@ -842,9 +846,12 @@ class DeepPot { VALUETYPE *atomic_ener_ = &atom_energy[0]; VALUETYPE *atomic_virial_ = &atom_virial[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam); + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; @@ -1039,6 +1046,7 @@ class DeepPot { DP_DeepPot *dp; int dfparam; int daparam; + bool aparam_nall; template void validate_fparam_aparam(const int &nframes, const int &nloc, @@ -1128,6 +1136,7 @@ class DeepPotModelDevi { numb_models = models.size(); dfparam = DP_DeepPotModelDeviGetDimFParam(dp); daparam = DP_DeepPotModelDeviGetDimAParam(dp); + aparam_nall = DP_DeepPotModelDeviIsAParamNAll(dp); }; /** @@ -1173,9 +1182,12 @@ class DeepPotModelDevi { VALUETYPE *force_ = &force_flat[0]; VALUETYPE *virial_ = &virial_flat[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam); + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; @@ -1250,9 +1262,12 @@ class DeepPotModelDevi { VALUETYPE *atomic_ener_ = &atom_energy_flat[0]; VALUETYPE *atomic_virial_ = &atom_virial_flat[0]; std::vector fparam_, aparam_; - validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam); + validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)), + fparam, aparam); tile_fparam_aparam(fparam_, nframes, dfparam, fparam); - tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? natoms : (natoms - nghost)) * daparam, + aparam); const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr; const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr; @@ -1448,6 +1463,7 @@ class DeepPotModelDevi { int numb_models; int dfparam; int daparam; + bool aparam_nall; template void validate_fparam_aparam(const int &nframes, const int &nloc, diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 1e2ee47b8b..9d1ed7d323 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -29,6 +29,7 @@ DP_DeepPot::DP_DeepPot() {} DP_DeepPot::DP_DeepPot(deepmd::DeepPot& dp) : dp(dp) { dfparam = dp.dim_fparam(); daparam = dp.dim_aparam(); + aparam_nall = dp.is_aparam_nall(); } DP_DeepPot* DP_NewDeepPot(const char* c_model) { @@ -65,6 +66,7 @@ DP_DeepPotModelDevi::DP_DeepPotModelDevi(deepmd::DeepPotModelDevi& dp) : dp(dp) { dfparam = dp.dim_fparam(); daparam = dp.dim_aparam(); + aparam_nall = dp.is_aparam_nall(); } DP_DeepPotModelDevi* DP_NewDeepPotModelDevi(const char** c_models, @@ -249,7 +251,10 @@ inline void DP_DeepPotComputeNList_variant(DP_DeepPot* dp, } std::vector aparam_; if (aparam) { - aparam_.assign(aparam, aparam + nframes * (natoms - nghost) * dp->daparam); + aparam_.assign(aparam, + aparam + nframes * + (dp->aparam_nall ? natoms : (natoms - nghost)) * + dp->daparam); } std::vector e; std::vector f, v, ae, av; @@ -433,7 +438,9 @@ void DP_DeepPotModelDeviComputeNList_variant(DP_DeepPotModelDevi* dp, } std::vector aparam_; if (aparam) { - aparam_.assign(aparam, aparam + (natoms - nghost) * dp->daparam); + aparam_.assign( + aparam, + aparam + (dp->aparam_nall ? natoms : (natoms - nghost)) * dp->daparam); } // different from DeepPot std::vector e; @@ -1031,6 +1038,8 @@ int DP_DeepPotGetDimFParam(DP_DeepPot* dp) { return dp->dfparam; } int DP_DeepPotGetDimAParam(DP_DeepPot* dp) { return dp->daparam; } +bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp) { return dp->aparam_nall; } + const char* DP_DeepPotCheckOK(DP_DeepPot* dp) { return string_to_char(dp->exception); } @@ -1133,6 +1142,10 @@ int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp) { return dp->daparam; } +bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp) { + return dp->aparam_nall; +} + const char* DP_DeepPotModelDeviCheckOK(DP_DeepPotModelDevi* dp) { return string_to_char(dp->exception); } diff --git a/source/api_c/tests/test_deeppot_a_fparam_aparam.cc b/source/api_c/tests/test_deeppot_a_fparam_aparam.cc index f4cdc42e72..a728ede22d 100644 --- a/source/api_c/tests/test_deeppot_a_fparam_aparam.cc +++ b/source/api_c/tests/test_deeppot_a_fparam_aparam.cc @@ -380,3 +380,57 @@ TYPED_TEST(TestInferDeepPotAFParamAParam, cpu_lmp_nlist_2rc) { EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON); } } + +template +class TestInferAParamNAll : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector atype = {0, 0, 0, 0, 0, 0}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + int natoms = 6; + + deepmd::hpp::DeepPot dp; + + void SetUp() override { + std::string file_name = "../../tests/infer/pairwise_dprc.pbtxt"; + deepmd::hpp::convert_pbtxt_to_pb(file_name, "pairwise_dprc.pb"); + dp.init("pairwise_dprc.pb"); + }; + + void TearDown() override { remove("fparam_aparam.pb"); }; +}; + +TYPED_TEST_SUITE(TestInferAParamNAll, ValueTypes); + +TYPED_TEST(TestInferAParamNAll, cpu_lmp_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& atype = this->atype; + std::vector& box = this->box; + int& natoms = this->natoms; + deepmd::hpp::DeepPot& dp = this->dp; + float rc = dp.cutoff(); + int nloc = coord.size() / 3; + std::vector coord_cpy; + std::vector atype_cpy, mapping; + std::vector > nlist_data; + _build_nlist(nlist_data, coord_cpy, atype_cpy, mapping, coord, + atype, box, rc); + int nall = coord_cpy.size() / 3; + // nall aparam + std::vector aparam_cpy(nall, 0); + // for some reason all QM atoms do not work + aparam_cpy[0] = 1; + std::vector ilist(nloc), numneigh(nloc); + std::vector firstneigh(nloc); + deepmd::hpp::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + + double ener; + std::vector force_, virial; + dp.compute(ener, force_, virial, coord_cpy, atype_cpy, box, nall - nloc, + inlist, 0, std::vector(), aparam_cpy); + // just check if the interface accepts nall aparam; no interest with results +} diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index 4db012ea4f..7c4a0afe10 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -291,6 +291,16 @@ class DeepPot { **/ void get_type_map(std::string& type_map); + /** + * @brief Get whether the atom dimension of aparam is nall instead of fparam. + * @param[out] aparam_nall whether the atom dimension of aparam is nall + *instead of fparam. + **/ + bool is_aparam_nall() const { + assert(inited); + return aparam_nall; + }; + private: tensorflow::Session* session; int num_intra_nthreads, num_inter_nthreads; @@ -309,6 +319,7 @@ class DeepPot { int ntypes_spin; int dfparam; int daparam; + bool aparam_nall; /** * @brief Validate the size of frame and atomic parameters. * @param[in] nframes The number of frames. @@ -572,6 +583,15 @@ class DeepPotModelDevi { void compute_relative_std_f(std::vector& std, const std::vector& avg, const VALUETYPE eps); + /** + * @brief Get whether the atom dimension of aparam is nall instead of fparam. + * @param[out] aparam_nall whether the atom dimension of aparam is nall + *instead of fparam. + **/ + bool is_aparam_nall() const { + assert(inited); + return aparam_nall; + }; private: unsigned numb_models; @@ -592,6 +612,7 @@ class DeepPotModelDevi { int ntypes_spin; int dfparam; int daparam; + bool aparam_nall; template void validate_fparam_aparam(const int& nloc, const std::vector& fparam, diff --git a/source/api_cc/include/common.h b/source/api_cc/include/common.h index 2bcb3cc77f..481e09cc89 100644 --- a/source/api_cc/include/common.h +++ b/source/api_cc/include/common.h @@ -90,7 +90,8 @@ void select_real_atoms_coord(std::vector& dcoord, const int& ntypes, const int& nframes, const int& daparam, - const int& nall); + const int& nall, + const bool aparam_nall = false); /** * @brief Apply the given map to a vector. @@ -228,6 +229,8 @@ int session_get_dtype(tensorflow::Session* session, * @param[in] aparam_ Atom parameters. * @param[in] atommap Atom map. * @param[in] scope The scope of the tensors. + * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * nall. */ template int session_input_tensors( @@ -240,7 +243,8 @@ int session_input_tensors( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope = ""); + const std::string scope = "", + const bool aparam_nall = false); /** * @brief Get input tensors. @@ -255,6 +259,8 @@ int session_input_tensors( * @param[in] nghost Number of ghost atoms. * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] scope The scope of the tensors. + * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * nall. */ template int session_input_tensors( @@ -269,7 +275,8 @@ int session_input_tensors( const deepmd::AtomMap& atommap, const int nghost, const int ago, - const std::string scope = ""); + const std::string scope = "", + const bool aparam_nall = false); /** * @brief Get input tensors for mixed type. @@ -285,6 +292,8 @@ int session_input_tensors( * @param[in] nghost Number of ghost atoms. * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] scope The scope of the tensors. + * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * nall. */ template int session_input_tensors_mixed_type( @@ -298,7 +307,8 @@ int session_input_tensors_mixed_type( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope = ""); + const std::string scope = "", + const bool aparam_nall = false); /** * @brief Read model file to a string. diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index e20989eb9d..785ed00cb8 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -479,6 +479,15 @@ void DeepPot::init(const std::string& model, if (daparam < 0) { daparam = 0; } + if (daparam > 0) { + try { + aparam_nall = get_scalar("fitting_attr/aparam_nall"); + } catch (deepmd::deepmd_exception) { + aparam_nall = false; + } + } else { + aparam_nall = false; + } model_type = get_scalar("model_attr/model_type"); inited = true; @@ -571,23 +580,25 @@ void DeepPot::compute(ENERGYVTYPE& dener, assert(nloc == atommap.get_type().size()); std::vector fparam; std::vector aparam; - validate_fparam_aparam(nframes, nloc, fparam_, aparam_); + validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, + aparam_); tile_fparam_aparam(fparam, nframes, dfparam, fparam_); - tile_fparam_aparam(aparam, nframes, nloc * daparam, aparam_); + tile_fparam_aparam(aparam, nframes, (aparam_nall ? nall : nloc) * daparam, + aparam_); std::vector> input_tensors; if (dtype == tensorflow::DT_DOUBLE) { - int ret = - session_input_tensors(input_tensors, dcoord_, ntypes, datype_, - dbox, cell_size, fparam, aparam, atommap); + int ret = session_input_tensors(input_tensors, dcoord_, ntypes, + datype_, dbox, cell_size, fparam, + aparam, atommap, "", aparam_nall); assert(ret == nloc); run_model(dener, dforce_, dvirial, session, input_tensors, atommap, nframes); } else { - int ret = - session_input_tensors(input_tensors, dcoord_, ntypes, datype_, - dbox, cell_size, fparam, aparam, atommap); + int ret = session_input_tensors(input_tensors, dcoord_, ntypes, + datype_, dbox, cell_size, fparam, + aparam, atommap, "", aparam_nall); assert(ret == nloc); run_model(dener, dforce_, dvirial, session, input_tensors, atommap, nframes); @@ -650,9 +661,12 @@ void DeepPot::compute(ENERGYVTYPE& dener, int nframes = dcoord_.size() / nall / 3; std::vector fparam; std::vector aparam_; - validate_fparam_aparam(nframes, nall - nghost, fparam_, aparam__); + validate_fparam_aparam(nframes, (aparam_nall ? nall : (nall - nghost)), + fparam_, aparam__); tile_fparam_aparam(fparam, nframes, dfparam, fparam_); - tile_fparam_aparam(aparam_, nframes, (nall - nghost) * daparam, aparam__); + tile_fparam_aparam(aparam_, nframes, + (aparam_nall ? nall : (nall - nghost)) * daparam, + aparam__); // select real atoms std::vector dcoord, dforce, aparam; @@ -660,7 +674,7 @@ void DeepPot::compute(ENERGYVTYPE& dener, int nghost_real, nall_real, nloc_real; select_real_atoms_coord(dcoord, datype, aparam, nghost_real, fwd_map, bkw_map, nall_real, nloc_real, dcoord_, datype_, aparam_, - nghost, ntypes, nframes, daparam, nall); + nghost, ntypes, nframes, daparam, nall, aparam_nall); // internal nlist if (ago == 0) { @@ -752,16 +766,16 @@ void DeepPot::compute_inner(ENERGYVTYPE& dener, nlist_data.make_inlist(nlist); } if (dtype == tensorflow::DT_DOUBLE) { - int ret = session_input_tensors(input_tensors, dcoord_, ntypes, - datype_, dbox, nlist, fparam, - aparam, atommap, nghost, ago); + int ret = session_input_tensors( + input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam, + atommap, nghost, ago, "", aparam_nall); assert(nloc == ret); run_model(dener, dforce_, dvirial, session, input_tensors, atommap, nframes, nghost); } else { - int ret = session_input_tensors(input_tensors, dcoord_, ntypes, - datype_, dbox, nlist, fparam, aparam, - atommap, nghost, ago); + int ret = session_input_tensors( + input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam, + atommap, nghost, ago, "", aparam_nall); assert(nloc == ret); run_model(dener, dforce_, dvirial, session, input_tensors, atommap, nframes, nghost); @@ -839,15 +853,15 @@ void DeepPot::compute(ENERGYVTYPE& dener, std::vector> input_tensors; if (dtype == tensorflow::DT_DOUBLE) { - int nloc = - session_input_tensors(input_tensors, dcoord_, ntypes, datype_, - dbox, cell_size, fparam, aparam, atommap); + int nloc = session_input_tensors(input_tensors, dcoord_, ntypes, + datype_, dbox, cell_size, fparam, + aparam, atommap, "", aparam_nall); run_model(dener, dforce_, dvirial, datom_energy_, datom_virial_, session, input_tensors, atommap, nframes); } else { - int nloc = - session_input_tensors(input_tensors, dcoord_, ntypes, datype_, - dbox, cell_size, fparam, aparam, atommap); + int nloc = session_input_tensors(input_tensors, dcoord_, ntypes, + datype_, dbox, cell_size, fparam, + aparam, atommap, "", aparam_nall); run_model(dener, dforce_, dvirial, datom_energy_, datom_virial_, session, input_tensors, atommap, nframes); } @@ -920,9 +934,11 @@ void DeepPot::compute(ENERGYVTYPE& dener, int nloc = nall - nghost; std::vector fparam; std::vector aparam_; - validate_fparam_aparam(nframes, nloc, fparam_, aparam__); + validate_fparam_aparam(nframes, (aparam_nall ? nall : nloc), fparam_, + aparam__); tile_fparam_aparam(fparam, nframes, dfparam, fparam_); - tile_fparam_aparam(aparam_, nframes, nloc * daparam, aparam__); + tile_fparam_aparam(aparam_, nframes, (aparam_nall ? nall : nloc) * daparam, + aparam__); std::vector> input_tensors; // select real atoms std::vector dcoord, dforce, aparam, datom_energy, datom_virial; @@ -930,7 +946,7 @@ void DeepPot::compute(ENERGYVTYPE& dener, int nghost_real, nall_real, nloc_real; select_real_atoms_coord(dcoord, datype, aparam, nghost_real, fwd_map, bkw_map, nall_real, nloc_real, dcoord_, datype_, aparam_, - nghost, ntypes, nframes, daparam, nall); + nghost, ntypes, nframes, daparam, nall, aparam_nall); if (ago == 0) { atommap = deepmd::AtomMap(datype.begin(), datype.begin() + nloc_real); @@ -943,16 +959,16 @@ void DeepPot::compute(ENERGYVTYPE& dener, } if (dtype == tensorflow::DT_DOUBLE) { - int ret = session_input_tensors(input_tensors, dcoord, ntypes, - datype, dbox, nlist, fparam, aparam, - atommap, nghost_real, ago); + int ret = session_input_tensors( + input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, + atommap, nghost_real, ago, "", aparam_nall); assert(nloc_real == ret); run_model(dener, dforce, dvirial, datom_energy, datom_virial, session, input_tensors, atommap, nframes, nghost_real); } else { - int ret = session_input_tensors(input_tensors, dcoord, ntypes, - datype, dbox, nlist, fparam, aparam, - atommap, nghost_real, ago); + int ret = session_input_tensors( + input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, + atommap, nghost_real, ago, "", aparam_nall); assert(nloc_real == ret); run_model(dener, dforce, dvirial, datom_energy, datom_virial, session, input_tensors, atommap, nframes, nghost_real); @@ -1055,14 +1071,14 @@ void DeepPot::compute_mixed_type(ENERGYVTYPE& dener, if (dtype == tensorflow::DT_DOUBLE) { int ret = session_input_tensors_mixed_type( input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size, - fparam, aparam, atommap); + fparam, aparam, atommap, "", aparam_nall); assert(ret == nloc); run_model(dener, dforce_, dvirial, session, input_tensors, atommap, nframes); } else { int ret = session_input_tensors_mixed_type( input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size, - fparam, aparam, atommap); + fparam, aparam, atommap, "", aparam_nall); assert(ret == nloc); run_model(dener, dforce_, dvirial, session, input_tensors, atommap, nframes); @@ -1139,13 +1155,13 @@ void DeepPot::compute_mixed_type(ENERGYVTYPE& dener, if (dtype == tensorflow::DT_DOUBLE) { int nloc = session_input_tensors_mixed_type( input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size, - fparam, aparam, atommap); + fparam, aparam, atommap, "", aparam_nall); run_model(dener, dforce_, dvirial, datom_energy_, datom_virial_, session, input_tensors, atommap, nframes); } else { int nloc = session_input_tensors_mixed_type( input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size, - fparam, aparam, atommap); + fparam, aparam, atommap, "", aparam_nall); run_model(dener, dforce_, dvirial, datom_energy_, datom_virial_, session, input_tensors, atommap, nframes); } @@ -1307,6 +1323,15 @@ void DeepPotModelDevi::init(const std::vector& models, if (daparam < 0) { daparam = 0; } + if (daparam > 0) { + try { + aparam_nall = get_scalar("fitting_attr/aparam_nall"); + } catch (deepmd::deepmd_exception) { + aparam_nall = false; + } + } else { + aparam_nall = false; + } model_type = get_scalar("model_attr/model_type"); // rcut = get_rcut(); // cell_size = rcut; @@ -1425,7 +1450,7 @@ void DeepPotModelDevi::compute(std::vector& all_energy, int nall = dcoord_.size() / 3; int nframes = 1; int nloc = nall - nghost; - validate_fparam_aparam(nloc, fparam, aparam_); + validate_fparam_aparam((aparam_nall ? nall : nloc), fparam, aparam_); std::vector> input_tensors; // select real atoms @@ -1434,7 +1459,7 @@ void DeepPotModelDevi::compute(std::vector& all_energy, int nghost_real, nall_real, nloc_real; select_real_atoms_coord(dcoord, datype, aparam, nghost_real, fwd_map, bkw_map, nall_real, nloc_real, dcoord_, datype_, aparam_, - nghost, ntypes, nframes, daparam, nall); + nghost, ntypes, nframes, daparam, nall, aparam_nall); // agp == 0 means that the LAMMPS nbor list has been updated if (ago == 0) { @@ -1450,11 +1475,11 @@ void DeepPotModelDevi::compute(std::vector& all_energy, if (dtype == tensorflow::DT_DOUBLE) { ret = session_input_tensors(input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, atommap, - nghost_real, ago); + nghost_real, ago, "", aparam_nall); } else { ret = session_input_tensors(input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, atommap, - nghost_real, ago); + nghost_real, ago, "", aparam_nall); } all_energy.resize(numb_models); all_force.resize(numb_models); @@ -1523,7 +1548,7 @@ void DeepPotModelDevi::compute( int nframes = 1; int nall = dcoord_.size() / 3; int nloc = nall - nghost; - validate_fparam_aparam(nloc, fparam, aparam_); + validate_fparam_aparam((aparam_nall ? nall : nloc), fparam, aparam_); std::vector> input_tensors; // select real atoms @@ -1532,7 +1557,7 @@ void DeepPotModelDevi::compute( int nghost_real, nall_real, nloc_real; select_real_atoms_coord(dcoord, datype, aparam, nghost_real, fwd_map, bkw_map, nall_real, nloc_real, dcoord_, datype_, aparam_, - nghost, ntypes, nframes, daparam, nall); + nghost, ntypes, nframes, daparam, nall, aparam_nall); // agp == 0 means that the LAMMPS nbor list has been updated if (ago == 0) { @@ -1548,11 +1573,11 @@ void DeepPotModelDevi::compute( if (dtype == tensorflow::DT_DOUBLE) { ret = session_input_tensors(input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, atommap, - nghost_real, ago); + nghost_real, ago, "", aparam_nall); } else { ret = session_input_tensors(input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam, atommap, - nghost_real, ago); + nghost_real, ago, "", aparam_nall); } all_energy.resize(numb_models); diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index 43412c4c43..0e2526414d 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -164,7 +164,8 @@ void deepmd::select_real_atoms_coord(std::vector& dcoord, const int& ntypes, const int& nframes, const int& daparam, - const int& nall) { + const int& nall, + const bool aparam_nall) { select_real_atoms(fwd_map, bkw_map, nghost_real, dcoord_, datype_, nghost, ntypes); // resize to nall_real @@ -177,9 +178,10 @@ void deepmd::select_real_atoms_coord(std::vector& dcoord, select_map(datype, datype_, fwd_map, 1); // aparam if (daparam > 0) { - aparam.resize(nframes * nloc_real); - select_map(aparam, aparam_, fwd_map, daparam, nframes, nloc_real, - nall - nghost); + aparam.resize(nframes * (aparam_nall ? nall_real : nloc_real)); + select_map(aparam, aparam_, fwd_map, daparam, nframes, + (aparam_nall ? nall_real : nloc_real), + (aparam_nall ? nall : (nall - nghost))); } } @@ -199,7 +201,8 @@ template void deepmd::select_real_atoms_coord( const int& ntypes, const int& nframes, const int& daparam, - const int& nall); + const int& nall, + const bool aparam_nall); template void deepmd::select_real_atoms_coord( std::vector& dcoord, @@ -217,7 +220,8 @@ template void deepmd::select_real_atoms_coord( const int& ntypes, const int& nframes, const int& daparam, - const int& nall); + const int& nall, + const bool aparam_nall); void deepmd::NeighborListData::copy_from_nlist(const InputNlist& inlist) { int inum = inlist.inum; @@ -374,7 +378,8 @@ int deepmd::session_input_tensors( const std::vector& fparam_, const std::vector& aparam__, const deepmd::AtomMap& atommap, - const std::string scope) { + const std::string scope, + const bool aparam_nall) { int nframes = dcoord_.size() / 3 / datype_.size(); int nall = datype_.size(); int nloc = nall; @@ -440,8 +445,10 @@ int deepmd::session_input_tensors( std::vector dcoord(dcoord_); atommap.forward(dcoord.begin(), dcoord_.begin(), 3, nframes, nall); std::vector aparam_(aparam__); - atommap.forward(aparam_.begin(), aparam__.begin(), - aparam__.size() / nframes / nloc, nframes, nloc); + atommap.forward( + aparam_.begin(), aparam__.begin(), + aparam__.size() / nframes / (aparam_nall ? nall : nloc), nframes, + (aparam_nall ? nall : nloc)); for (int ii = 0; ii < nframes; ++ii) { for (int jj = 0; jj < nall * 3; ++jj) { @@ -511,7 +518,8 @@ int deepmd::session_input_tensors( const deepmd::AtomMap& atommap, const int nghost, const int ago, - const std::string scope) { + const std::string scope, + const bool aparam_nall) { int nframes = dcoord_.size() / 3 / datype_.size(); int nall = datype_.size(); int nloc = nall - nghost; @@ -573,8 +581,10 @@ int deepmd::session_input_tensors( std::vector dcoord(dcoord_); atommap.forward(dcoord.begin(), dcoord_.begin(), 3, nframes, nall); std::vector aparam_(aparam__); - atommap.forward(aparam_.begin(), aparam__.begin(), - aparam__.size() / nframes / nloc, nframes, nloc); + atommap.forward( + aparam_.begin(), aparam__.begin(), + aparam__.size() / nframes / (aparam_nall ? nall : nloc), nframes, + (aparam_nall ? nall : nloc)); for (int ii = 0; ii < nframes; ++ii) { for (int jj = 0; jj < nall * 3; ++jj) { @@ -645,7 +655,8 @@ int deepmd::session_input_tensors_mixed_type( const std::vector& fparam_, const std::vector& aparam__, const deepmd::AtomMap& atommap, - const std::string scope) { + const std::string scope, + const bool aparam_nall) { int nall = datype_.size() / nframes; int nloc = nall; assert(nall * 3 * nframes == dcoord_.size()); @@ -706,8 +717,10 @@ int deepmd::session_input_tensors_mixed_type( std::vector dcoord(dcoord_); atommap.forward(dcoord.begin(), dcoord_.begin(), 3, nframes, nall); std::vector aparam_(aparam__); - atommap.forward(aparam_.begin(), aparam__.begin(), - aparam__.size() / nframes / nloc, nframes, nloc); + atommap.forward( + aparam_.begin(), aparam__.begin(), + aparam__.size() / nframes / (aparam_nall ? nall : nloc), nframes, + (aparam_nall ? nall : nloc)); for (int ii = 0; ii < nframes; ++ii) { for (int jj = 0; jj < nall * 3; ++jj) { @@ -919,6 +932,10 @@ template int deepmd::session_get_scalar(Session*, const std::string, const std::string); +template bool deepmd::session_get_scalar(Session*, + const std::string, + const std::string); + template void deepmd::session_get_vector(std::vector&, Session*, const std::string, @@ -1092,7 +1109,8 @@ template int deepmd::session_input_tensors( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors( std::vector>& input_tensors, const std::vector& dcoord_, @@ -1103,7 +1121,8 @@ template int deepmd::session_input_tensors( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors( std::vector>& input_tensors, @@ -1115,7 +1134,8 @@ template int deepmd::session_input_tensors( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors( std::vector>& input_tensors, const std::vector& dcoord_, @@ -1126,7 +1146,8 @@ template int deepmd::session_input_tensors( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors( std::vector>& input_tensors, @@ -1140,7 +1161,8 @@ template int deepmd::session_input_tensors( const deepmd::AtomMap& atommap, const int nghost, const int ago, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors( std::vector>& input_tensors, const std::vector& dcoord_, @@ -1153,7 +1175,8 @@ template int deepmd::session_input_tensors( const deepmd::AtomMap& atommap, const int nghost, const int ago, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors( std::vector>& input_tensors, @@ -1167,7 +1190,8 @@ template int deepmd::session_input_tensors( const deepmd::AtomMap& atommap, const int nghost, const int ago, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors( std::vector>& input_tensors, const std::vector& dcoord_, @@ -1180,7 +1204,8 @@ template int deepmd::session_input_tensors( const deepmd::AtomMap& atommap, const int nghost, const int ago, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors_mixed_type( std::vector>& input_tensors, @@ -1193,7 +1218,8 @@ template int deepmd::session_input_tensors_mixed_type( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors_mixed_type( std::vector>& input_tensors, const int& nframes, @@ -1205,7 +1231,8 @@ template int deepmd::session_input_tensors_mixed_type( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors_mixed_type( std::vector>& input_tensors, @@ -1218,7 +1245,8 @@ template int deepmd::session_input_tensors_mixed_type( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); template int deepmd::session_input_tensors_mixed_type( std::vector>& input_tensors, const int& nframes, @@ -1230,7 +1258,8 @@ template int deepmd::session_input_tensors_mixed_type( const std::vector& fparam_, const std::vector& aparam_, const deepmd::AtomMap& atommap, - const std::string scope); + const std::string scope, + const bool aparam_nall); void deepmd::print_summary(const std::string& pre) { int num_intra_nthreads, num_inter_nthreads; diff --git a/source/lib/src/pairwise.cc b/source/lib/src/pairwise.cc index 428e92baa4..3fea27bd71 100644 --- a/source/lib/src/pairwise.cc +++ b/source/lib/src/pairwise.cc @@ -95,7 +95,7 @@ void deepmd::dprc_pairwise_map_cpu( // (3, 4, 8, 9, -1, 10, -1) forward_qmmm_map.resize((nfragments - 1) * map_size); std::fill(forward_qmmm_map.begin(), forward_qmmm_map.end(), -1); - int nqm_real; + int nqm_real = nloc; // init for nfragments = 1 for (int ii = 0; ii < nfragments - 1; ++ii) { // real for (int jj = 0, kk = 0; jj < nqm; ++jj) { diff --git a/source/op/pairwise.cc b/source/op/pairwise.cc index dfcfce6736..ee55c3dff3 100644 --- a/source/op/pairwise.cc +++ b/source/op/pairwise.cc @@ -78,7 +78,7 @@ class PairwiseIdxOp : public OpKernel { nghost_qmmm.push_back(nghost_qmmm_ii); nframes_qmmm.push_back(backward_qmmm_map.size() / nall); } - int max_nloc_qm = 0, max_nloc_qmmm = 0, max_nghost_qm = 0, + int max_nloc_qm = 1, max_nloc_qmmm = 1, max_nghost_qm = 0, max_nghost_qmmm = 0; for (int ii = 0; ii < nframes; ++ii) { max_nloc_qm = std::max(max_nloc_qm, nloc_qm[ii]); @@ -160,6 +160,10 @@ class PairwiseIdxOp : public OpKernel { } for (int jj = 0; jj < nall; ++jj) { m_backward_qm_map(ii, jj) = backward_qm_maps[ii][jj]; + // the ghost index should add the padding indexes + if (m_backward_qm_map(ii, jj) >= nloc_qm[ii]) { + m_backward_qm_map(ii, jj) += max_nloc_qm - nloc_qm[ii]; + } } for (int kk = 0; kk < nframes_qmmm[ii]; ++kk) { for (int jj = 0; jj < max_nloc_qmmm + max_nghost_qmmm; ++jj) { @@ -180,6 +184,10 @@ class PairwiseIdxOp : public OpKernel { for (int jj = 0; jj < nall; ++jj) { // max_nloc_qmmm + max_nghost_qmmm m_backward_qmmm_map(nn, jj) = backward_qmmm_maps[ii][kk * nall + jj]; + // the ghost index should add the padding indexes + if (m_backward_qmmm_map(nn, jj) >= nloc_qmmm[ii]) { + m_backward_qmmm_map(nn, jj) += max_nloc_qmmm - nloc_qmmm[ii]; + } } m_qmmm_frame_idx(nn) = ii; nn++; diff --git a/source/tests/infer/pairwise_dprc.pbtxt b/source/tests/infer/pairwise_dprc.pbtxt new file mode 100644 index 0000000000..1469bda72f --- /dev/null +++ b/source/tests/infer/pairwise_dprc.pbtxt @@ -0,0 +1,44536 @@ +node { + name: "train_attr/min_nbor_dist" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.9571980274118028 + } + } + } +} +node { + name: "train_attr/training_script" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "{\"model\":{\"type\":\"pairwise_dprc\",\"type_map\":[\"C\",\"P\",\"O\",\"H\",\"OW\",\"HW\"],\"type_embedding\":{\"neuron\":[8],\"precision\":\"float32\",\"activation_function\":\"tanh\",\"resnet_dt\":false,\"trainable\":true,\"seed\":null},\"qm_model\":{\"descriptor\":{\"type\":\"se_atten_v2\",\"sel\":24,\"rcut_smth\":0.5,\"rcut\":9.0,\"attn_layer\":0,\"neuron\":[2,4,8],\"resnet_dt\":false,\"axis_neuron\":4,\"precision\":\"float32\",\"seed\":1,\"activation_function\":\"tanh\",\"type_one_side\":false,\"trainable\":true,\"exclude_types\":[],\"attn\":128,\"attn_dotr\":true,\"attn_mask\":false,\"set_davg_zero\":false},\"fitting_net\":{\"type\":\"ener\",\"neuron\":[2,4,8],\"resnet_dt\":true,\"precision\":\"float32\",\"atom_ener\":[null,null,null,null,0.0,0.0],\"seed\":1,\"numb_fparam\":0,\"numb_aparam\":0,\"activation_function\":\"tanh\",\"trainable\":true,\"rcond\":null,\"use_aparam_as_mask\":false},\"data_stat_nbatch\":10,\"data_stat_protect\":0.01,\"data_bias_nsample\":10,\"srtab_add_bias\":true,\"type\":\"standard\"},\"qmmm_model\":{\"descriptor\":{\"type\":\"se_atten_v2\",\"sel\":27,\"rcut_smth\":0.5,\"rcut\":6.0,\"attn_layer\":0,\"neuron\":[2,4,8],\"resnet_dt\":false,\"axis_neuron\":4,\"set_davg_zero\":true,\"exclude_types\":[[0,0],[0,1],[0,2],[0,3],[1,1],[1,2],[1,3],[2,2],[2,3],[3,3],[4,4],[4,5],[5,5]],\"precision\":\"float32\",\"seed\":1,\"activation_function\":\"tanh\",\"type_one_side\":false,\"trainable\":true,\"attn\":128,\"attn_dotr\":true,\"attn_mask\":false},\"fitting_net\":{\"type\":\"ener\",\"neuron\":[2,2,2],\"resnet_dt\":true,\"seed\":1,\"precision\":\"float32\",\"atom_ener\":[0.0,0.0,0.0,0.0,0.0,0.0],\"numb_fparam\":0,\"numb_aparam\":0,\"activation_function\":\"tanh\",\"trainable\":true,\"rcond\":null,\"use_aparam_as_mask\":false},\"data_stat_nbatch\":10,\"data_stat_protect\":0.01,\"data_bias_nsample\":10,\"srtab_add_bias\":true,\"type\":\"standard\"},\"data_stat_nbatch\":10,\"data_stat_protect\":0.01,\"data_bias_nsample\":10,\"srtab_add_bias\":true},\"learning_rate\":{\"type\":\"exp\",\"decay_steps\":5000,\"start_lr\":0.001,\"stop_lr\":3.51e-08,\"scale_by_worker\":\"linear\"},\"loss\":{\"type\":\"ener\",\"start_pref_e\":0.02,\"limit_pref_e\":1,\"start_pref_f\":1000,\"limit_pref_f\":1,\"start_pref_v\":0,\"limit_pref_v\":0,\"start_pref_ae\":0.0,\"limit_pref_ae\":0.0,\"start_pref_pf\":0.0,\"limit_pref_pf\":0.0,\"enable_atom_ener_coeff\":false,\"start_pref_gf\":0.0,\"limit_pref_gf\":0.0,\"numb_generalized_coord\":0},\"training\":{\"training_data\":{\"systems\":[\"../data\"],\"batch_size\":\"auto\",\"set_prefix\":\"set\",\"auto_prob\":\"prob_sys_size\",\"sys_probs\":null},\"numb_steps\":0,\"seed\":10,\"disp_file\":\"lcurve.out\",\"disp_freq\":100,\"save_freq\":1000,\"validation_data\":null,\"save_ckpt\":\"model.ckpt\",\"disp_training\":true,\"time_training\":true,\"profiling\":false,\"profiling_file\":\"timeline.json\",\"enable_profiler\":false,\"tensorboard\":false,\"tensorboard_log_dir\":\"log\",\"tensorboard_freq\":1}}" + } + } + } +} +node { + name: "model_type" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "original_model" + } + } + } +} +node { + name: "t_box" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_coord" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_aparam" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_type" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "t_natoms" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: 8 + } + } + } + } +} +node { + name: "t_mesh" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + } + } + } +} +node { + name: "model_attr/tmap" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "C P O H OW HW" + } + } + } +} +node { + name: "model_attr/model_type" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "ener" + } + } + } +} +node { + name: "model_attr/model_version" + op: "Const" + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "1.1" + } + } + } +} +node { + name: "fitting_attr/dfparam" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "fitting_attr/daparam" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "fitting_attr/aparam_nall" + op: "Const" + attr { + key: "dtype" + value { + type: DT_BOOL + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_BOOL + tensor_shape { + } + bool_val: true + } + } + } +} +node { + name: "descrpt_attr/ntypes" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 6 + } + } + } +} +node { + name: "descrpt_attr/rcut" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 9.0 + } + } + } +} +node { + name: "Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "Reshape" + op: "Reshape" + input: "t_box" + input: "Reshape/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape" + op: "Shape" + input: "Reshape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice" + op: "StridedSlice" + input: "Shape" + input: "strided_slice/stack" + input: "strided_slice/stack_1" + input: "strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Cast" + op: "Cast" + input: "t_aparam" + attr { + key: "DstT" + value { + type: DT_INT32 + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_1" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_1/stack" + input: "strided_slice_1/stack_1" + input: "strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_1/shape" + op: "Pack" + input: "strided_slice" + input: "strided_slice_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_1" + op: "Reshape" + input: "Cast" + input: "Reshape_1/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "DprcPairwiseIdx" + op: "DprcPairwiseIdx" + input: "Reshape_1" + input: "t_natoms" +} +node { + name: "strided_slice_2/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_2/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_2/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_2" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_2/stack" + input: "strided_slice_2/stack_1" + input: "strided_slice_2/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_2/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "Reshape_2/shape" + op: "Pack" + input: "strided_slice" + input: "strided_slice_2" + input: "Reshape_2/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_2" + op: "Reshape" + input: "t_coord" + input: "Reshape_2/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_3/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_3/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_3/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_3" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_3/stack" + input: "strided_slice_3/stack_1" + input: "strided_slice_3/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_3/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "Reshape_3/shape" + op: "Pack" + input: "strided_slice" + input: "strided_slice_3" + input: "Reshape_3/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_3" + op: "Reshape" + input: "t_type" + input: "Reshape_3/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_1" + op: "Shape" + input: "DprcPairwiseIdx:6" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_4/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_4/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_4/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_4" + op: "StridedSlice" + input: "Shape_1" + input: "strided_slice_4/stack" + input: "strided_slice_4/stack_1" + input: "strided_slice_4/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_2" + op: "Shape" + input: "Reshape_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_5/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_5/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_5/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_5" + op: "StridedSlice" + input: "Shape_2" + input: "strided_slice_5/stack" + input: "strided_slice_5/stack_1" + input: "strided_slice_5/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_3" + op: "Shape" + input: "Reshape_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_6/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_6/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_6/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_6" + op: "StridedSlice" + input: "Shape_3" + input: "strided_slice_6/stack" + input: "strided_slice_6/stack_1" + input: "strided_slice_6/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat/values_0" + op: "Pack" + input: "strided_slice_5" + input: "concat/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat" + op: "ConcatV2" + input: "concat/values_0" + input: "strided_slice_6" + input: "concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill" + op: "Fill" + input: "concat" + input: "Fill/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_1" + op: "Cast" + input: "Fill" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_1/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_1" + op: "ConcatV2" + input: "Cast_1" + input: "Reshape_2" + input: "concat_1/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add" + op: "AddV2" + input: "DprcPairwiseIdx" + input: "add/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2" + op: "GatherV2" + input: "concat_1" + input: "add" + input: "GatherV2/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "Shape_4" + op: "Shape" + input: "Reshape_3" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_7/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_7/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_7/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_7" + op: "StridedSlice" + input: "Shape_4" + input: "strided_slice_7/stack" + input: "strided_slice_7/stack_1" + input: "strided_slice_7/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_5" + op: "Shape" + input: "Reshape_3" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_8/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_8/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_8/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_8" + op: "StridedSlice" + input: "Shape_5" + input: "strided_slice_8/stack" + input: "strided_slice_8/stack_1" + input: "strided_slice_8/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_2/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_2/values_0" + op: "Pack" + input: "strided_slice_7" + input: "concat_2/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_2/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_2" + op: "ConcatV2" + input: "concat_2/values_0" + input: "strided_slice_8" + input: "concat_2/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_1/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Fill_1" + op: "Fill" + input: "concat_2" + input: "Fill_1/value" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_3/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_3" + op: "ConcatV2" + input: "Fill_1" + input: "Reshape_3" + input: "concat_3/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_1/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_1" + op: "AddV2" + input: "DprcPairwiseIdx" + input: "add_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_1/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_1" + op: "GatherV2" + input: "concat_3" + input: "add_1" + input: "GatherV2_1/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_INT32 + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "GatherV2_2/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "GatherV2_2" + op: "GatherV2" + input: "Reshape_2" + input: "DprcPairwiseIdx:6" + input: "GatherV2_2/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "Shape_6" + op: "Shape" + input: "GatherV2_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_9/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_9/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_9/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_9" + op: "StridedSlice" + input: "Shape_6" + input: "strided_slice_9/stack" + input: "strided_slice_9/stack_1" + input: "strided_slice_9/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_7" + op: "Shape" + input: "GatherV2_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_10/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_10/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_10/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_10" + op: "StridedSlice" + input: "Shape_7" + input: "strided_slice_10/stack" + input: "strided_slice_10/stack_1" + input: "strided_slice_10/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_4/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_4/values_0" + op: "Pack" + input: "strided_slice_9" + input: "concat_4/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_4/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_4" + op: "ConcatV2" + input: "concat_4/values_0" + input: "strided_slice_10" + input: "concat_4/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_2/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill_2" + op: "Fill" + input: "concat_4" + input: "Fill_2/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_2" + op: "Cast" + input: "Fill_2" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_5/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_5" + op: "ConcatV2" + input: "Cast_2" + input: "GatherV2_2" + input: "concat_5/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_2/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_2" + op: "AddV2" + input: "DprcPairwiseIdx:2" + input: "add_2/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_3/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_3" + op: "GatherV2" + input: "concat_5" + input: "add_2" + input: "GatherV2_3/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "GatherV2_4/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "GatherV2_4" + op: "GatherV2" + input: "Reshape_3" + input: "DprcPairwiseIdx:6" + input: "GatherV2_4/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_INT32 + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "Shape_8" + op: "Shape" + input: "GatherV2_4" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_11/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_11/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_11/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_11" + op: "StridedSlice" + input: "Shape_8" + input: "strided_slice_11/stack" + input: "strided_slice_11/stack_1" + input: "strided_slice_11/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_9" + op: "Shape" + input: "GatherV2_4" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_12/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_12/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_12/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_12" + op: "StridedSlice" + input: "Shape_9" + input: "strided_slice_12/stack" + input: "strided_slice_12/stack_1" + input: "strided_slice_12/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_6/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_6/values_0" + op: "Pack" + input: "strided_slice_11" + input: "concat_6/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_6/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_6" + op: "ConcatV2" + input: "concat_6/values_0" + input: "strided_slice_12" + input: "concat_6/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_3/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Fill_3" + op: "Fill" + input: "concat_6" + input: "Fill_3/value" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_7/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_7" + op: "ConcatV2" + input: "Fill_3" + input: "GatherV2_4" + input: "concat_7/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_3/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_3" + op: "AddV2" + input: "DprcPairwiseIdx:2" + input: "add_3/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_5/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_5" + op: "GatherV2" + input: "concat_7" + input: "add_3" + input: "GatherV2_5/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_INT32 + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "GatherV2_6/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "GatherV2_6" + op: "GatherV2" + input: "Reshape" + input: "DprcPairwiseIdx:6" + input: "GatherV2_6/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 6 + } + } + tensor_content: "\000\000\000\000\001\000\000\000\002\000\000\000\003\000\000\000\004\000\000\000\005\000\000\000" + } + } + } +} +node { + name: "one_hot/on_value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 1.0 + } + } + } +} +node { + name: "one_hot/off_value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "one_hot/depth" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 6 + } + } + } +} +node { + name: "one_hot" + op: "OneHot" + input: "Const" + input: "one_hot/depth" + input: "one_hot/on_value" + input: "one_hot/off_value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "TI" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: -1 + } + } +} +node { + name: "Reshape_4/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\006\000\000\000" + } + } + } +} +node { + name: "Reshape_4" + op: "Reshape" + input: "one_hot" + input: "Reshape_4/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "type_embed_net/matrix_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 6 + } + dim { + size: 8 + } + } + tensor_content: "\354T\211>\035\322\362>P\373h\276+\345\265\274r\357\330>\367\240\230\276\2658\364<\325\216\013>U\000\330=Qk_JU\016\276\225\341\\=\305\r\213\276\"\002\177<\241\341\310>\027\205$>>>v\276\333.\031\276\377\206\356\276\322\r\330\275{Ph>\030Y\254\276D\032\367\275S\305\312\276\033\230,>)\217P\276\320k\371=\330\353\363\276\247[e\276o\224\246=\322\240>\274qa\251>\340\333?=M\266B>\024\345\361\276\356\231\216\275\341[\'>=\263\245\275w\322\275>\370A\r\277j1\225>\223\205\262\276" + } + } + } +} +node { + name: "type_embed_net/matrix_1/read" + op: "Identity" + input: "type_embed_net/matrix_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@type_embed_net/matrix_1" + } + } + } +} +node { + name: "type_embed_net/bias_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 8 + } + } + tensor_content: "\213\262\020?;I\021\277\207Pb?:\334W>\025\300\005\300\310!\354>E\013\355?\246\232\342?" + } + } + } +} +node { + name: "type_embed_net/bias_1/read" + op: "Identity" + input: "type_embed_net/bias_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@type_embed_net/bias_1" + } + } + } +} +node { + name: "type_embed_net/MatMul" + op: "MatMul" + input: "Reshape_4" + input: "type_embed_net/matrix_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "type_embed_net/BiasAdd" + op: "BiasAdd" + input: "type_embed_net/MatMul" + input: "type_embed_net/bias_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "type_embed_net/Tanh" + op: "Tanh" + input: "type_embed_net/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "type_embed_net/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "type_embed_net/Reshape" + op: "Reshape" + input: "type_embed_net/Tanh" + input: "type_embed_net/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_5/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "Reshape_5" + op: "Reshape" + input: "type_embed_net/Reshape" + input: "Reshape_5/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "zeros" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + dim { + size: 8 + } + } + float_val: 0.0 + } + } + } +} +node { + name: "concat_8/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_8" + op: "ConcatV2" + input: "Reshape_5" + input: "zeros" + input: "concat_8/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "t_typeebd" + op: "Identity" + input: "concat_8" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "strided_slice_13/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_13/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_13/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_13" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_13/stack" + input: "strided_slice_13/stack_1" + input: "strided_slice_13/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul" + op: "Mul" + input: "strided_slice_13" + input: "mul/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_6/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_6/shape" + op: "Pack" + input: "Reshape_6/shape/0" + input: "mul" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_6" + op: "Reshape" + input: "GatherV2" + input: "Reshape_6/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_14/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_14/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_14/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_14" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_14/stack" + input: "strided_slice_14/stack_1" + input: "strided_slice_14/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_7/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_7/shape" + op: "Pack" + input: "Reshape_7/shape/0" + input: "strided_slice_14" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_7" + op: "Reshape" + input: "GatherV2_1" + input: "Reshape_7/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_10" + op: "Shape" + input: "Reshape_6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_15/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_15/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_15/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_15" + op: "StridedSlice" + input: "Shape_10" + input: "strided_slice_15/stack" + input: "strided_slice_15/stack_1" + input: "strided_slice_15/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "descrpt_attr_qm/sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 24 + } + } + } +} +node { + name: "descrpt_attr_qm/original_sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 24 + } + } + } +} +node { + name: "descrpt_attr_qm/t_avg" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 6 + } + dim { + size: 96 + } + } + tensor_content: "\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\r*\264e\205\336\327?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000V\322\345F;\274\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\277\211\260/\206/\326?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000(Z]\014\212\333\325?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\230K\"9\363\016\323?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\t\004\343\307\226_\322?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "descrpt_attr_qm/t_avg/read" + op: "Identity" + input: "descrpt_attr_qm/t_avg" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr_qm/t_avg" + } + } + } +} +node { + name: "descrpt_attr_qm/t_std" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 6 + } + dim { + size: 96 + } + } + tensor_content: "2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?2\253`h.D\311?\306\262a\322\227.\317?\306\262a\322\227.\317?\306\262a\322\227.\317?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\020R\020\347Vn\300?\361\366$\240\350\321\312?\361\366$\240\350\321\312?\361\366$\240\350\321\312?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?y\346\232h\306\345\277?\\\250\007\271\3318\313?\\\250\007\271\3318\313?\\\250\007\271\3318\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?HX\315\326\374b\303?`\372\204\226\235\232\313?`\372\204\226\235\232\313?`\372\204\226\235\232\313?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\327\213]P\320\014\316?xs\325\377\356\005\314?xs\325\377\356\005\314?xs\325\377\356\005\314?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?\252E\005Z\027\021\311?M\021hN\213\256\311?M\021hN\213\256\311?M\021hN\213\256\311?" + } + } + } +} +node { + name: "descrpt_attr_qm/t_std/read" + op: "Identity" + input: "descrpt_attr_qm/t_std" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr_qm/t_std" + } + } + } +} +node { + name: "strided_slice_16/stack" + op: "Const" + input: "^descrpt_attr_qm/original_sel" + input: "^descrpt_attr_qm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_16/stack_1" + op: "Const" + input: "^descrpt_attr_qm/original_sel" + input: "^descrpt_attr_qm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_16/stack_2" + op: "Const" + input: "^descrpt_attr_qm/original_sel" + input: "^descrpt_attr_qm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_16" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_16/stack" + input: "strided_slice_16/stack_1" + input: "strided_slice_16/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_1/y" + op: "Const" + input: "^descrpt_attr_qm/original_sel" + input: "^descrpt_attr_qm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_1" + op: "Mul" + input: "strided_slice_16" + input: "mul_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_8/shape/0" + op: "Const" + input: "^descrpt_attr_qm/original_sel" + input: "^descrpt_attr_qm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_8/shape" + op: "Pack" + input: "Reshape_8/shape/0" + input: "mul_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_8" + op: "Reshape" + input: "Reshape_6" + input: "Reshape_8/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_9/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "Reshape_9" + op: "Reshape" + input: "Reshape" + input: "Reshape_9/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_17/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_17/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_17/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_17" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_17/stack" + input: "strided_slice_17/stack_1" + input: "strided_slice_17/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_10/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_10/shape" + op: "Pack" + input: "Reshape_10/shape/0" + input: "strided_slice_17" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_10" + op: "Reshape" + input: "Reshape_7" + input: "Reshape_10/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ProdEnvMatAMix/mesh" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "ProdEnvMatAMix" + op: "ProdEnvMatAMix" + input: "Reshape_8" + input: "Reshape_10" + input: "DprcPairwiseIdx:4" + input: "Reshape_9" + input: "ProdEnvMatAMix/mesh" + input: "descrpt_attr_qm/t_avg/read" + input: "descrpt_attr_qm/t_std/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "rcut_a" + value { + f: -1.0 + } + } + attr { + key: "rcut_r" + value { + f: 9.0 + } + } + attr { + key: "rcut_r_smth" + value { + f: 0.5 + } + } + attr { + key: "sel_a" + value { + list { + i: 24 + } + } + } + attr { + key: "sel_r" + value { + list { + i: 0 + } + } + } +} +node { + name: "Reshape_11/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_11" + op: "Reshape" + input: "ProdEnvMatAMix:4" + input: "Reshape_11/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_13/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377`\000\000\000" + } + } + } +} +node { + name: "Reshape_13" + op: "Reshape" + input: "ProdEnvMatAMix" + input: "Reshape_13/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value/Minimum/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 5 + } + } + } +} +node { + name: "clip_by_value/Minimum" + op: "Minimum" + input: "Reshape_10" + input: "clip_by_value/Minimum/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "clip_by_value" + op: "Maximum" + input: "clip_by_value/Minimum" + input: "clip_by_value/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_18/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_18/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_18/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_18" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_18/stack" + input: "strided_slice_18/stack_1" + input: "strided_slice_18/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice/size" + op: "Pack" + input: "Slice/size/0" + input: "strided_slice_18" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice" + op: "Slice" + input: "clip_by_value" + input: "Slice/begin" + input: "Slice/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_14/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_14" + op: "Reshape" + input: "Slice" + input: "Reshape_14/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_rmat_qm" + op: "Identity" + input: "Reshape_13" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rmat_deriv_qm" + op: "Identity" + input: "ProdEnvMatAMix:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rij_qm" + op: "Identity" + input: "ProdEnvMatAMix:2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_nlist_qm" + op: "Identity" + input: "ProdEnvMatAMix:3" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_15/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\006\000\000\000\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_15" + op: "Reshape" + input: "descrpt_attr_qm/t_avg/read" + input: "Reshape_15/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_1/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_1/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\001\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Slice_1" + op: "Slice" + input: "Reshape_15" + input: "Slice_1/begin" + input: "Slice_1/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_16/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\006\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Reshape_16" + op: "Reshape" + input: "Slice_1" + input: "Reshape_16/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_17/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\006\000\000\000\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_17" + op: "Reshape" + input: "descrpt_attr_qm/t_std/read" + input: "Reshape_17/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_2/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_2/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\001\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Slice_2" + op: "Slice" + input: "Reshape_17" + input: "Slice_2/begin" + input: "Slice_2/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_18/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\006\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Reshape_18" + op: "Reshape" + input: "Slice_2" + input: "Reshape_18/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_16" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup" + op: "GatherV2" + input: "Reshape_16" + input: "Reshape_14" + input: "embedding_lookup/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_16" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup/Identity" + op: "Identity" + input: "embedding_lookup" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_19/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_19/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_19/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_19" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_19/stack" + input: "strided_slice_19/stack_1" + input: "strided_slice_19/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_19/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_19/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "Reshape_19/shape" + op: "Pack" + input: "Reshape_19/shape/0" + input: "strided_slice_19" + input: "Reshape_19/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_19" + op: "Reshape" + input: "embedding_lookup/Identity" + input: "Reshape_19/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_1/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_18" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_1" + op: "GatherV2" + input: "Reshape_18" + input: "Reshape_14" + input: "embedding_lookup_1/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_18" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_1/Identity" + op: "Identity" + input: "embedding_lookup_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_20/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_20/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_20/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_20" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_20/stack" + input: "strided_slice_20/stack_1" + input: "strided_slice_20/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_20/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_20/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "Reshape_20/shape" + op: "Pack" + input: "Reshape_20/shape/0" + input: "strided_slice_20" + input: "Reshape_20/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_20" + op: "Reshape" + input: "embedding_lookup_1/Identity" + input: "Reshape_20/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_21/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_21" + op: "Reshape" + input: "ProdEnvMatAMix" + input: "Reshape_21/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_3/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_3/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "Slice_3" + op: "Slice" + input: "Reshape_21" + input: "Slice_3/begin" + input: "Slice_3/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_21/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_21/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_21/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_21" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_21/stack" + input: "strided_slice_21/stack_1" + input: "strided_slice_21/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_22/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_22/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 24 + } + } + } +} +node { + name: "Reshape_22/shape" + op: "Pack" + input: "Reshape_22/shape/0" + input: "strided_slice_21" + input: "Reshape_22/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_22" + op: "Reshape" + input: "Slice_3" + input: "Reshape_22/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "mul_3" + op: "Mul" + input: "Reshape_22" + input: "Reshape_20" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "add_4" + op: "AddV2" + input: "mul_3" + input: "Reshape_19" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_4/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.5 + } + } + } +} +node { + name: "mul_4" + op: "Mul" + input: "mul_4/x" + input: "add_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "sub_1/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "sub_1" + op: "Sub" + input: "sub_1/x" + input: "mul_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Neg" + op: "Neg" + input: "sub_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_5" + op: "Mul" + input: "Neg" + input: "sub_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_6" + op: "Mul" + input: "mul_5" + input: "sub_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "add_5/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "add_5" + op: "AddV2" + input: "mul_6" + input: "add_5/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "clip_by_value_1/Minimum/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "clip_by_value_1/Minimum" + op: "Minimum" + input: "add_5" + input: "clip_by_value_1/Minimum/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "clip_by_value_1/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.0 + } + } + } +} +node { + name: "clip_by_value_1" + op: "Maximum" + input: "clip_by_value_1/Minimum" + input: "clip_by_value_1/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Cast_4" + op: "Cast" + input: "clip_by_value_1" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "strided_slice_22/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_22/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_22/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_22" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_22/stack" + input: "strided_slice_22/stack_1" + input: "strided_slice_22/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_23/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_23/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 96 + } + } + } +} +node { + name: "Reshape_23/shape" + op: "Pack" + input: "Reshape_23/shape/0" + input: "strided_slice_22" + input: "Reshape_23/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_23" + op: "Reshape" + input: "o_rmat_qm" + input: "Reshape_23/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_24/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377`\000\000\000" + } + } + } +} +node { + name: "Reshape_24" + op: "Reshape" + input: "Reshape_23" + input: "Reshape_24/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_5" + op: "Cast" + input: "Reshape_24" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Slice_4/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_4/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377`\000\000\000" + } + } + } +} +node { + name: "Slice_4" + op: "Slice" + input: "Cast_5" + input: "Slice_4/begin" + input: "Slice_4/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Shape_12" + op: "Shape" + input: "Slice_4" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_25/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_25/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_25/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_25" + op: "StridedSlice" + input: "Shape_12" + input: "strided_slice_25/stack" + input: "strided_slice_25/stack_1" + input: "strided_slice_25/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_26/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_26" + op: "Reshape" + input: "Slice_4" + input: "Reshape_26/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_5/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_5/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "Slice_5" + op: "Slice" + input: "Reshape_26" + input: "Slice_5/begin" + input: "Slice_5/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_27/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "Reshape_27" + op: "Reshape" + input: "Slice_5" + input: "Reshape_27/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/matrix_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + dim { + size: 2 + } + } + tensor_content: "\025,/>\337\222\224\274" + } + } + } +} +node { + name: "filter_type_all_qm/matrix_1/read" + op: "Identity" + input: "filter_type_all_qm/matrix_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/matrix_1" + } + } + } +} +node { + name: "filter_type_all_qm/bias_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "k\313\226\277\270\2765\277" + } + } + } +} +node { + name: "filter_type_all_qm/bias_1/read" + op: "Identity" + input: "filter_type_all_qm/bias_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/bias_1" + } + } + } +} +node { + name: "filter_type_all_qm/MatMul" + op: "MatMul" + input: "Reshape_27" + input: "filter_type_all_qm/matrix_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qm/BiasAdd" + op: "BiasAdd" + input: "filter_type_all_qm/MatMul" + input: "filter_type_all_qm/bias_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qm/Tanh" + op: "Tanh" + input: "filter_type_all_qm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape" + op: "Reshape" + input: "filter_type_all_qm/Tanh" + input: "filter_type_all_qm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qm/concat" + op: "ConcatV2" + input: "Reshape_27" + input: "Reshape_27" + input: "filter_type_all_qm/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/add" + op: "AddV2" + input: "filter_type_all_qm/concat" + input: "filter_type_all_qm/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/matrix_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 4 + } + } + tensor_content: "\364\000\235\274\271\244\373\276C\345\363>\317\257\364>\336\315\005\276\263&\237\275\305\332\024\277p7\310>" + } + } + } +} +node { + name: "filter_type_all_qm/matrix_2/read" + op: "Identity" + input: "filter_type_all_qm/matrix_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/matrix_2" + } + } + } +} +node { + name: "filter_type_all_qm/bias_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + tensor_content: "J\223\376\276n\333\375>\306A\010?\347\234\223\276" + } + } + } +} +node { + name: "filter_type_all_qm/bias_2/read" + op: "Identity" + input: "filter_type_all_qm/bias_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/bias_2" + } + } + } +} +node { + name: "filter_type_all_qm/MatMul_1" + op: "MatMul" + input: "filter_type_all_qm/add" + input: "filter_type_all_qm/matrix_2/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qm/BiasAdd_1" + op: "BiasAdd" + input: "filter_type_all_qm/MatMul_1" + input: "filter_type_all_qm/bias_2/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qm/Tanh_1" + op: "Tanh" + input: "filter_type_all_qm/BiasAdd_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape_1/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_1" + op: "Reshape" + input: "filter_type_all_qm/Tanh_1" + input: "filter_type_all_qm/Reshape_1/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/concat_1/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qm/concat_1" + op: "ConcatV2" + input: "filter_type_all_qm/add" + input: "filter_type_all_qm/add" + input: "filter_type_all_qm/concat_1/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/add_1" + op: "AddV2" + input: "filter_type_all_qm/concat_1" + input: "filter_type_all_qm/Reshape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/matrix_3" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + dim { + size: 8 + } + } + tensor_content: "\037\356\361>k\024\224>\322\335\271>\220\016\005>\306\231\244>\014\352\257\276\340G\335\275\225\242\202\276rC\"\274\204a/\276\010P\260\275F<\023\277D\025\365=`\217\302>3\000+\276\251\307\005?C\235\030\276\376v\365=\330\226\331\276\261\003\230>\206\356H>\324\306\340\274\361L\224\276W2B?&\214K\276v\251A\276\350a\213>\006\202\177>o\222U\276$m\230\276\313\335\300>h\255\243=" + } + } + } +} +node { + name: "filter_type_all_qm/matrix_3/read" + op: "Identity" + input: "filter_type_all_qm/matrix_3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/matrix_3" + } + } + } +} +node { + name: "filter_type_all_qm/bias_3" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 8 + } + } + tensor_content: "g\264\263?\272E\330\277\244iX\276`8\223?\302\227\301?Q]\333>\000\341,\300m\030\204=" + } + } + } +} +node { + name: "filter_type_all_qm/bias_3/read" + op: "Identity" + input: "filter_type_all_qm/bias_3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/bias_3" + } + } + } +} +node { + name: "filter_type_all_qm/MatMul_2" + op: "MatMul" + input: "filter_type_all_qm/add_1" + input: "filter_type_all_qm/matrix_3/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qm/BiasAdd_2" + op: "BiasAdd" + input: "filter_type_all_qm/MatMul_2" + input: "filter_type_all_qm/bias_3/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qm/Tanh_2" + op: "Tanh" + input: "filter_type_all_qm/BiasAdd_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape_2/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_2" + op: "Reshape" + input: "filter_type_all_qm/Tanh_2" + input: "filter_type_all_qm/Reshape_2/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/concat_2/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qm/concat_2" + op: "ConcatV2" + input: "filter_type_all_qm/add_1" + input: "filter_type_all_qm/add_1" + input: "filter_type_all_qm/concat_2/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/add_2" + op: "AddV2" + input: "filter_type_all_qm/concat_2" + input: "filter_type_all_qm/Reshape_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape_3/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_3" + op: "Reshape" + input: "Reshape_14" + input: "filter_type_all_qm/Reshape_3/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/mul/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 7 + } + } + } +} +node { + name: "filter_type_all_qm/mul" + op: "Mul" + input: "filter_type_all_qm/Reshape_3" + input: "filter_type_all_qm/mul/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Tile/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\001\000\000\000\030\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Tile" + op: "Tile" + input: "filter_type_all_qm/mul" + input: "filter_type_all_qm/Tile/multiples" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Reshape_4/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\030\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_4" + op: "Reshape" + input: "Reshape_11" + input: "filter_type_all_qm/Reshape_4/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/add_3" + op: "AddV2" + input: "filter_type_all_qm/Tile" + input: "filter_type_all_qm/Reshape_4" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Reshape_5/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_5" + op: "Reshape" + input: "filter_type_all_qm/add_3" + input: "filter_type_all_qm/Reshape_5/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Reshape_6/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\001\000\000\000\007\000\000\000\377\377\377\377" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_6" + op: "Reshape" + input: "t_typeebd" + input: "filter_type_all_qm/Reshape_6/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Tile_1/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\007\000\000\000\001\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Tile_1" + op: "Tile" + input: "filter_type_all_qm/Reshape_6" + input: "filter_type_all_qm/Tile_1/multiples" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Reshape_7/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\007\000\000\000\001\000\000\000\377\377\377\377" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_7" + op: "Reshape" + input: "t_typeebd" + input: "filter_type_all_qm/Reshape_7/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Tile_2/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\001\000\000\000\007\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Tile_2" + op: "Tile" + input: "filter_type_all_qm/Reshape_7" + input: "filter_type_all_qm/Tile_2/multiples" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/concat_3/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "filter_type_all_qm/concat_3" + op: "ConcatV2" + input: "filter_type_all_qm/Tile_1" + input: "filter_type_all_qm/Tile_2" + input: "filter_type_all_qm/concat_3/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/Reshape_8/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\020\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_8" + op: "Reshape" + input: "filter_type_all_qm/concat_3" + input: "filter_type_all_qm/Reshape_8/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/matrix_1_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 16 + } + dim { + size: 2 + } + } + tensor_content: "\n\007\217=\257\236\362\273IGS\274\rS\021<,\305\032?\260\300\034\276J7%\276\352\343\322=\303\311\263>\302\243g\275+a@<\000\207\311=\365\234\337<@\356S>C\221\206=\343D\277\276\217\003\212>1e\200>\251l\210>\210\337\237>\361\327\376>\230lP\276\023D\336\276\361@\345=U\003o\276\'\017\037\276\326\303\000\276\002\374F>\255\256\211>\364D\014?\346\232%<\375\251\204;" + } + } + } +} +node { + name: "filter_type_all_qm/matrix_1_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qm/matrix_1_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/matrix_1_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qm/bias_1_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "k\313\226\277\270\2765\277" + } + } + } +} +node { + name: "filter_type_all_qm/bias_1_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qm/bias_1_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/bias_1_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qm/MatMul_3" + op: "MatMul" + input: "filter_type_all_qm/Reshape_8" + input: "filter_type_all_qm/matrix_1_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qm/BiasAdd_3" + op: "BiasAdd" + input: "filter_type_all_qm/MatMul_3" + input: "filter_type_all_qm/bias_1_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qm/Tanh_3" + op: "Tanh" + input: "filter_type_all_qm/BiasAdd_3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape_9/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_9" + op: "Reshape" + input: "filter_type_all_qm/Tanh_3" + input: "filter_type_all_qm/Reshape_9/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/matrix_2_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 4 + } + } + tensor_content: "\364\000\235\274\271\244\373\276C\345\363>\317\257\364>\336\315\005\276\263&\237\275\305\332\024\277p7\310>" + } + } + } +} +node { + name: "filter_type_all_qm/matrix_2_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qm/matrix_2_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/matrix_2_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qm/bias_2_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + tensor_content: "J\223\376\276n\333\375>\306A\010?\347\234\223\276" + } + } + } +} +node { + name: "filter_type_all_qm/bias_2_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qm/bias_2_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/bias_2_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qm/MatMul_4" + op: "MatMul" + input: "filter_type_all_qm/Reshape_9" + input: "filter_type_all_qm/matrix_2_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qm/BiasAdd_4" + op: "BiasAdd" + input: "filter_type_all_qm/MatMul_4" + input: "filter_type_all_qm/bias_2_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qm/Tanh_4" + op: "Tanh" + input: "filter_type_all_qm/BiasAdd_4" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape_10/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_10" + op: "Reshape" + input: "filter_type_all_qm/Tanh_4" + input: "filter_type_all_qm/Reshape_10/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/concat_4/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qm/concat_4" + op: "ConcatV2" + input: "filter_type_all_qm/Reshape_9" + input: "filter_type_all_qm/Reshape_9" + input: "filter_type_all_qm/concat_4/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/add_4" + op: "AddV2" + input: "filter_type_all_qm/concat_4" + input: "filter_type_all_qm/Reshape_10" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/matrix_3_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + dim { + size: 8 + } + } + tensor_content: "\037\356\361>k\024\224>\322\335\271>\220\016\005>\306\231\244>\014\352\257\276\340G\335\275\225\242\202\276rC\"\274\204a/\276\010P\260\275F<\023\277D\025\365=`\217\302>3\000+\276\251\307\005?C\235\030\276\376v\365=\330\226\331\276\261\003\230>\206\356H>\324\306\340\274\361L\224\276W2B?&\214K\276v\251A\276\350a\213>\006\202\177>o\222U\276$m\230\276\313\335\300>h\255\243=" + } + } + } +} +node { + name: "filter_type_all_qm/matrix_3_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qm/matrix_3_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/matrix_3_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qm/bias_3_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 8 + } + } + tensor_content: "g\264\263?\272E\330\277\244iX\276`8\223?\302\227\301?Q]\333>\000\341,\300m\030\204=" + } + } + } +} +node { + name: "filter_type_all_qm/bias_3_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qm/bias_3_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/bias_3_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qm/MatMul_5" + op: "MatMul" + input: "filter_type_all_qm/add_4" + input: "filter_type_all_qm/matrix_3_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qm/BiasAdd_5" + op: "BiasAdd" + input: "filter_type_all_qm/MatMul_5" + input: "filter_type_all_qm/bias_3_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qm/Tanh_5" + op: "Tanh" + input: "filter_type_all_qm/BiasAdd_5" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape_11/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_11" + op: "Reshape" + input: "filter_type_all_qm/Tanh_5" + input: "filter_type_all_qm/Reshape_11/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/concat_5/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qm/concat_5" + op: "ConcatV2" + input: "filter_type_all_qm/add_4" + input: "filter_type_all_qm/add_4" + input: "filter_type_all_qm/concat_5/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/add_5" + op: "AddV2" + input: "filter_type_all_qm/concat_5" + input: "filter_type_all_qm/Reshape_11" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/embedding_lookup/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/add_5" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all_qm/embedding_lookup" + op: "GatherV2" + input: "filter_type_all_qm/add_5" + input: "filter_type_all_qm/Reshape_5" + input: "filter_type_all_qm/embedding_lookup/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qm/add_5" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "filter_type_all_qm/embedding_lookup/Identity" + op: "Identity" + input: "filter_type_all_qm/embedding_lookup" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/Reshape_12/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qm/Reshape_12" + op: "Reshape" + input: "Cast_4" + input: "filter_type_all_qm/Reshape_12/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qm/mul_1" + op: "Mul" + input: "filter_type_all_qm/embedding_lookup/Identity" + input: "filter_type_all_qm/Reshape_12" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/mul_2" + op: "Mul" + input: "filter_type_all_qm/add_2" + input: "filter_type_all_qm/mul_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qm/add_6" + op: "AddV2" + input: "filter_type_all_qm/mul_2" + input: "filter_type_all_qm/add_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_29/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\030\000\000\000\010\000\000\000" + } + } + } +} +node { + name: "Reshape_29" + op: "Reshape" + input: "filter_type_all_qm/add_6" + input: "Reshape_29/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_30/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 24 + } + } + } +} +node { + name: "Reshape_30/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "Reshape_30/shape" + op: "Pack" + input: "strided_slice_25" + input: "Reshape_30/shape/1" + input: "Reshape_30/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_30" + op: "Reshape" + input: "Slice_4" + input: "Reshape_30/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "MatMul" + op: "BatchMatMulV2" + input: "Reshape_30" + input: "Reshape_29" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "truediv/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 24.0 + } + } + } +} +node { + name: "truediv" + op: "RealDiv" + input: "MatMul" + input: "truediv/y" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Slice_7/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_7/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Slice_7" + op: "Slice" + input: "truediv" + input: "Slice_7/begin" + input: "Slice_7/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "MatMul_1" + op: "BatchMatMulV2" + input: "truediv" + input: "Slice_7" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "Reshape_31/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377 \000\000\000" + } + } + } +} +node { + name: "Reshape_31" + op: "Reshape" + input: "MatMul_1" + input: "Reshape_31/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_7" + op: "Cast" + input: "Reshape_31" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Shape_13" + op: "Shape" + input: "Reshape_23" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_26/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_26/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_26/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_26" + op: "StridedSlice" + input: "Shape_13" + input: "strided_slice_26/stack" + input: "strided_slice_26/stack_1" + input: "strided_slice_26/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_27/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_27/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_27/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_27" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_27/stack" + input: "strided_slice_27/stack_1" + input: "strided_slice_27/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_32/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 32 + } + } + } +} +node { + name: "Reshape_32/shape" + op: "Pack" + input: "strided_slice_26" + input: "strided_slice_27" + input: "Reshape_32/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_32" + op: "Reshape" + input: "Cast_7" + input: "Reshape_32/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_9/concat" + op: "Identity" + input: "Reshape_32" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_descriptor_qm" + op: "Identity" + input: "concat_9/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "fitting_attr_qm/t_bias_atom_e" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 6 + } + } + tensor_content: "\024w(\231N\030\n@bOp\273\211e\341?\024w(\231N\030\n@(m\272a\235\353\027@\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "fitting_attr_qm/t_bias_atom_e/read" + op: "Identity" + input: "fitting_attr_qm/t_bias_atom_e" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr_qm/t_bias_atom_e" + } + } + } +} +node { + name: "strided_slice_30/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_30/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_30/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_30" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_30/stack" + input: "strided_slice_30/stack_1" + input: "strided_slice_30/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_34/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_34/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 32 + } + } + } +} +node { + name: "Reshape_34/shape" + op: "Pack" + input: "Reshape_34/shape/0" + input: "strided_slice_30" + input: "Reshape_34/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_34" + op: "Reshape" + input: "o_descriptor_qm" + input: "Reshape_34/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_31/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_31/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_31/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_31" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_31/stack" + input: "strided_slice_31/stack_1" + input: "strided_slice_31/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "zeros_1/packed/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 32 + } + } + } +} +node { + name: "zeros_1/packed" + op: "Pack" + input: "strided_slice_15" + input: "strided_slice_31" + input: "zeros_1/packed/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "zeros_1/Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.0 + } + } + } +} +node { + name: "zeros_1" + op: "Fill" + input: "zeros_1/packed" + input: "zeros_1/Const" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_32/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_32/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_32/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_32" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_32/stack" + input: "strided_slice_32/stack_1" + input: "strided_slice_32/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_35/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_35/shape" + op: "Pack" + input: "Reshape_35/shape/0" + input: "strided_slice_32" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_35" + op: "Reshape" + input: "GatherV2_1" + input: "Reshape_35/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_33/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_33/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_33/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_33" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_33/stack" + input: "strided_slice_33/stack_1" + input: "strided_slice_33/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_9/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_9/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_9/size" + op: "Pack" + input: "Slice_9/size/0" + input: "strided_slice_33" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_9" + op: "Slice" + input: "Reshape_35" + input: "Slice_9/begin" + input: "Slice_9/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GreaterEqual/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "GreaterEqual" + op: "GreaterEqual" + input: "Slice_9" + input: "GreaterEqual/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_9" + op: "Cast" + input: "GreaterEqual" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_BOOL + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_36/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_36" + op: "Reshape" + input: "Slice_9" + input: "Reshape_36/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value_2/Minimum/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 5 + } + } + } +} +node { + name: "clip_by_value_2/Minimum" + op: "Minimum" + input: "Reshape_36" + input: "clip_by_value_2/Minimum/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value_2/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "clip_by_value_2" + op: "Maximum" + input: "clip_by_value_2/Minimum" + input: "clip_by_value_2/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_2/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@t_typeebd" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_2" + op: "GatherV2" + input: "t_typeebd" + input: "clip_by_value_2" + input: "embedding_lookup_2/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@t_typeebd" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_2/Identity" + op: "Identity" + input: "embedding_lookup_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Cast_10" + op: "Cast" + input: "embedding_lookup_2/Identity" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_37/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377 \000\000\000" + } + } + } +} +node { + name: "Reshape_37" + op: "Reshape" + input: "Reshape_34" + input: "Reshape_37/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_11/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_11" + op: "ConcatV2" + input: "Reshape_37" + input: "Cast_10" + input: "concat_11/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_34/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_34/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_34/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_34" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_34/stack" + input: "strided_slice_34/stack_1" + input: "strided_slice_34/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_38/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_38/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 40 + } + } + } +} +node { + name: "Reshape_38/shape" + op: "Pack" + input: "Reshape_38/shape/0" + input: "strided_slice_34" + input: "Reshape_38/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_38" + op: "Reshape" + input: "concat_11" + input: "Reshape_38/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_35/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_35/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_35/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_35" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_35/stack" + input: "strided_slice_35/stack_1" + input: "strided_slice_35/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Cast_11" + op: "Cast" + input: "Reshape_38" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Slice_10/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_10/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_10/size/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_10/size" + op: "Pack" + input: "Slice_10/size/0" + input: "strided_slice_35" + input: "Slice_10/size/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_10" + op: "Slice" + input: "Cast_11" + input: "Slice_10/begin" + input: "Slice_10/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_39/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377(\000\000\000" + } + } + } +} +node { + name: "Reshape_39" + op: "Reshape" + input: "Slice_10" + input: "Reshape_39/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_0_qm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 40 + } + dim { + size: 2 + } + } + tensor_content: "\2241\000\276\224\223j>\333(%\rr\263=c\366q\275\261\261j\276C^G>\343\347\177\273\304@\'\275\3331r>s\263\211\276Fa\212\275\336\335\262\275\347\253J=\254\3762>h\375\263\2764\223\230=\254jO\276\364\211a=\271\262\210\276\272\030\313\273\016\'\016>j$Z\276\265\024j>\303(\033\275\267\206\347\275\236\237\367\274\023I5<\306\007\310=1Bx\276\022\rR>Xf9\276\005\177\237;B\301\226=X\024\212=M;H\275nq\216\275h\257\216\2769\341\235\245 o>NN\016=\310\2134\275\242\347)=\333\364\211>N\372\021>\373\254\242\276\315\262F\275\321=?>&\221\333=-](\276?\357\t;\247\264(\276>\256\350\274g@\256\276s\200\220>\253nD>7\266T\275\034K\312\273\370DT\275\340PB\275&\\\362\275\310u5\275\221B:\275\357\360\377\275" + } + } + } +} +node { + name: "layer_0_qm/matrix/read" + op: "Identity" + input: "layer_0_qm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_qm/matrix" + } + } + } +} +node { + name: "layer_0_qm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\'\255[\277\211WI\276" + } + } + } +} +node { + name: "layer_0_qm/bias/read" + op: "Identity" + input: "layer_0_qm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_qm/bias" + } + } + } +} +node { + name: "layer_0_qm/MatMul" + op: "MatMul" + input: "Reshape_39" + input: "layer_0_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_0_qm/BiasAdd" + op: "BiasAdd" + input: "layer_0_qm/MatMul" + input: "layer_0_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_0_qm/Tanh" + op: "Tanh" + input: "layer_0_qm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_0_qm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_0_qm/Reshape" + op: "Reshape" + input: "layer_0_qm/Tanh" + input: "layer_0_qm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_1_qm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 4 + } + } + tensor_content: "\034\273\367=n\035R\274\367\370\266\274\223\265{<\361\010\206?v\300\207\276\314\024\217\276\351\2426>" + } + } + } +} +node { + name: "layer_1_qm/matrix/read" + op: "Identity" + input: "layer_1_qm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_1_qm/matrix" + } + } + } +} +node { + name: "layer_1_qm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + tensor_content: "k\313\226\277\270\2765\277\244\303\337\277o\241\375\276" + } + } + } +} +node { + name: "layer_1_qm/bias/read" + op: "Identity" + input: "layer_1_qm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_1_qm/bias" + } + } + } +} +node { + name: "layer_1_qm/MatMul" + op: "MatMul" + input: "layer_0_qm/Reshape" + input: "layer_1_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_1_qm/BiasAdd" + op: "BiasAdd" + input: "layer_1_qm/MatMul" + input: "layer_1_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_1_qm/Tanh" + op: "Tanh" + input: "layer_1_qm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_1_qm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "layer_1_qm/Reshape" + op: "Reshape" + input: "layer_1_qm/Tanh" + input: "layer_1_qm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_2_qm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + dim { + size: 8 + } + } + tensor_content: "k\t^\274T\360\261\276\317u\254>\010\005\255>H:\275\275\340\022a\275\034\203\322\276\021\223\215>\256@\207\275\315=K\276\006\300\207=,\271\245=@E\035>\020\016\267=\331w\007?\274_\034\276;!\245>\026\0003\275\255UR=\377\340\n>3nI\276\3762\234=\2629\235\276\230s\361\276\235\ni>p\373\251>d\2230\276$\271X>y\2133=}\027&\276\036\020\210>H\215n>" + } + } + } +} +node { + name: "layer_2_qm/matrix/read" + op: "Identity" + input: "layer_2_qm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_2_qm/matrix" + } + } + } +} +node { + name: "layer_2_qm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 8 + } + } + tensor_content: "J\223\376\276n\333\375>\306A\010?\347\234\223\2767\000\265?%\034\345<{\002\023?c\350o?" + } + } + } +} +node { + name: "layer_2_qm/bias/read" + op: "Identity" + input: "layer_2_qm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_2_qm/bias" + } + } + } +} +node { + name: "layer_2_qm/MatMul" + op: "MatMul" + input: "layer_1_qm/Reshape" + input: "layer_2_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_2_qm/BiasAdd" + op: "BiasAdd" + input: "layer_2_qm/MatMul" + input: "layer_2_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_2_qm/Tanh" + op: "Tanh" + input: "layer_2_qm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_2_qm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "layer_2_qm/Reshape" + op: "Reshape" + input: "layer_2_qm/Tanh" + input: "layer_2_qm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "final_layer_qm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 8 + } + dim { + size: 1 + } + } + tensor_content: "\257\255\013?\337\374\252>\302\236\326>\020\244\031>\203\020\276>\327 \313\276V\203\377\275+\330\226\276" + } + } + } +} +node { + name: "final_layer_qm/matrix/read" + op: "Identity" + input: "final_layer_qm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_qm/matrix" + } + } + } +} +node { + name: "final_layer_qm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + } + float_val: 1.4039429426193237 + } + } + } +} +node { + name: "final_layer_qm/bias/read" + op: "Identity" + input: "final_layer_qm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_qm/bias" + } + } + } +} +node { + name: "final_layer_qm/MatMul" + op: "MatMul" + input: "layer_2_qm/Reshape" + input: "final_layer_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "final_layer_qm/BiasAdd" + op: "BiasAdd" + input: "final_layer_qm/MatMul" + input: "final_layer_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "Cast_12" + op: "Cast" + input: "final_layer_qm/BiasAdd" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_40/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377 \000\000\000" + } + } + } +} +node { + name: "Reshape_40" + op: "Reshape" + input: "zeros_1" + input: "Reshape_40/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_12/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_12" + op: "ConcatV2" + input: "Reshape_40" + input: "Cast_10" + input: "concat_12/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_36/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_36/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_36/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_36" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_36/stack" + input: "strided_slice_36/stack_1" + input: "strided_slice_36/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_41/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_41/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 40 + } + } + } +} +node { + name: "Reshape_41/shape" + op: "Pack" + input: "Reshape_41/shape/0" + input: "strided_slice_36" + input: "Reshape_41/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_41" + op: "Reshape" + input: "concat_12" + input: "Reshape_41/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_37/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_37/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_37/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_37" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_37/stack" + input: "strided_slice_37/stack_1" + input: "strided_slice_37/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Cast_13" + op: "Cast" + input: "Reshape_41" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Slice_11/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_11/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_11/size/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_11/size" + op: "Pack" + input: "Slice_11/size/0" + input: "strided_slice_37" + input: "Slice_11/size/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_11" + op: "Slice" + input: "Cast_13" + input: "Slice_11/begin" + input: "Slice_11/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_42/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377(\000\000\000" + } + } + } +} +node { + name: "Reshape_42" + op: "Reshape" + input: "Slice_11" + input: "Reshape_42/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_0_qm_1/MatMul" + op: "MatMul" + input: "Reshape_42" + input: "layer_0_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_0_qm_1/BiasAdd" + op: "BiasAdd" + input: "layer_0_qm_1/MatMul" + input: "layer_0_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_0_qm_1/Tanh" + op: "Tanh" + input: "layer_0_qm_1/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_0_qm_1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_0_qm_1/Reshape" + op: "Reshape" + input: "layer_0_qm_1/Tanh" + input: "layer_0_qm_1/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_1_qm_1/MatMul" + op: "MatMul" + input: "layer_0_qm_1/Reshape" + input: "layer_1_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_1_qm_1/BiasAdd" + op: "BiasAdd" + input: "layer_1_qm_1/MatMul" + input: "layer_1_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_1_qm_1/Tanh" + op: "Tanh" + input: "layer_1_qm_1/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_1_qm_1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "layer_1_qm_1/Reshape" + op: "Reshape" + input: "layer_1_qm_1/Tanh" + input: "layer_1_qm_1/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_2_qm_1/MatMul" + op: "MatMul" + input: "layer_1_qm_1/Reshape" + input: "layer_2_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_2_qm_1/BiasAdd" + op: "BiasAdd" + input: "layer_2_qm_1/MatMul" + input: "layer_2_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_2_qm_1/Tanh" + op: "Tanh" + input: "layer_2_qm_1/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_2_qm_1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "layer_2_qm_1/Reshape" + op: "Reshape" + input: "layer_2_qm_1/Tanh" + input: "layer_2_qm_1/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "final_layer_qm_1/MatMul" + op: "MatMul" + input: "layer_2_qm_1/Reshape" + input: "final_layer_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "final_layer_qm_1/BiasAdd" + op: "BiasAdd" + input: "final_layer_qm_1/MatMul" + input: "final_layer_qm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "Cast_14" + op: "Cast" + input: "final_layer_qm_1/BiasAdd" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "sub_2" + op: "Sub" + input: "Cast_12" + input: "Cast_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Shape_15" + op: "Shape" + input: "Reshape_38" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_38/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_38/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_38/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_38" + op: "StridedSlice" + input: "Shape_15" + input: "strided_slice_38/stack" + input: "strided_slice_38/stack_1" + input: "strided_slice_38/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_39/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_39/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_39/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_39" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_39/stack" + input: "strided_slice_39/stack_1" + input: "strided_slice_39/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_43/shape" + op: "Pack" + input: "strided_slice_38" + input: "strided_slice_39" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_43" + op: "Reshape" + input: "sub_2" + input: "Reshape_43/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_3/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr_qm/t_bias_atom_e" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_3" + op: "GatherV2" + input: "fitting_attr_qm/t_bias_atom_e/read" + input: "clip_by_value_2" + input: "embedding_lookup_3/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr_qm/t_bias_atom_e" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_3/Identity" + op: "Identity" + input: "embedding_lookup_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Shape_16" + op: "Shape" + input: "Reshape_38" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_40/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_40/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_40/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_40" + op: "StridedSlice" + input: "Shape_16" + input: "strided_slice_40/stack" + input: "strided_slice_40/stack_1" + input: "strided_slice_40/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_41/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_41/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 8 + } + } + } +} +node { + name: "strided_slice_41/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_41" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_41/stack" + input: "strided_slice_41/stack_1" + input: "strided_slice_41/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "Rank" + op: "Rank" + input: "strided_slice_41" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "range/start" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "range/delta" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "range" + op: "Range" + input: "range/start" + input: "Rank" + input: "range/delta" + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Sum" + op: "Sum" + input: "strided_slice_41" + input: "range" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "Reshape_44/shape" + op: "Pack" + input: "strided_slice_40" + input: "Sum" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_44" + op: "Reshape" + input: "embedding_lookup_3/Identity" + input: "Reshape_44/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "add_6" + op: "AddV2" + input: "Reshape_43" + input: "Reshape_44" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_8" + op: "Mul" + input: "add_6" + input: "Cast_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_45/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_45" + op: "Reshape" + input: "mul_8" + input: "Reshape_45/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_42/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_42/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_42/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_42" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_42/stack" + input: "strided_slice_42/stack_1" + input: "strided_slice_42/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "o_atom_energy_qm/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_atom_energy_qm/shape" + op: "Pack" + input: "o_atom_energy_qm/shape/0" + input: "strided_slice_42" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_atom_energy_qm" + op: "Reshape" + input: "Reshape_45" + input: "o_atom_energy_qm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_energy_qm/reduction_indices" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "o_energy_qm" + op: "Sum" + input: "o_atom_energy_qm" + input: "o_energy_qm/reduction_indices" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/Shape" + op: "Shape" + input: "Reshape_45" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/grad_ys_0/Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "gradients/grad_ys_0" + op: "Fill" + input: "gradients/Shape" + input: "gradients/grad_ys_0/Const" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_45_grad/Shape" + op: "Shape" + input: "mul_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_45_grad/Reshape" + op: "Reshape" + input: "gradients/grad_ys_0" + input: "gradients/Reshape_45_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/mul_8_grad/Shape" + op: "Shape" + input: "add_6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/mul_8_grad/Shape_1" + op: "Shape" + input: "Cast_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/mul_8_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/mul_8_grad/Shape" + input: "gradients/mul_8_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/mul_8_grad/Mul" + op: "Mul" + input: "gradients/Reshape_45_grad/Reshape" + input: "Cast_9" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/mul_8_grad/Sum" + op: "Sum" + input: "gradients/mul_8_grad/Mul" + input: "gradients/mul_8_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/mul_8_grad/Reshape" + op: "Reshape" + input: "gradients/mul_8_grad/Sum" + input: "gradients/mul_8_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/add_6_grad/Shape" + op: "Shape" + input: "Reshape_43" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/add_6_grad/Shape_1" + op: "Shape" + input: "Reshape_44" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/add_6_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/add_6_grad/Shape" + input: "gradients/add_6_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/add_6_grad/Sum" + op: "Sum" + input: "gradients/mul_8_grad/Reshape" + input: "gradients/add_6_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/add_6_grad/Reshape" + op: "Reshape" + input: "gradients/add_6_grad/Sum" + input: "gradients/add_6_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_43_grad/Shape" + op: "Shape" + input: "sub_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_43_grad/Reshape" + op: "Reshape" + input: "gradients/add_6_grad/Reshape" + input: "gradients/Reshape_43_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/sub_2_grad/Shape" + op: "Shape" + input: "Cast_12" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/sub_2_grad/Shape_1" + op: "Shape" + input: "Cast_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/sub_2_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/sub_2_grad/Shape" + input: "gradients/sub_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/sub_2_grad/Sum" + op: "Sum" + input: "gradients/Reshape_43_grad/Reshape" + input: "gradients/sub_2_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/sub_2_grad/Reshape" + op: "Reshape" + input: "gradients/sub_2_grad/Sum" + input: "gradients/sub_2_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Cast_12_grad/Cast" + op: "Cast" + input: "gradients/sub_2_grad/Reshape" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients/final_layer_qm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/Cast_12_grad/Cast" + input: "final_layer_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/layer_2_qm/Reshape_grad/Shape" + op: "Shape" + input: "layer_2_qm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/layer_2_qm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients/final_layer_qm/MatMul_grad/MatMul" + input: "gradients/layer_2_qm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/layer_2_qm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_2_qm/Tanh" + input: "gradients/layer_2_qm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/layer_2_qm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/layer_2_qm/Tanh_grad/TanhGrad" + input: "layer_2_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/layer_1_qm/Reshape_grad/Shape" + op: "Shape" + input: "layer_1_qm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/layer_1_qm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients/layer_2_qm/MatMul_grad/MatMul" + input: "gradients/layer_1_qm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/layer_1_qm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_1_qm/Tanh" + input: "gradients/layer_1_qm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/layer_1_qm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/layer_1_qm/Tanh_grad/TanhGrad" + input: "layer_1_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/layer_0_qm/Reshape_grad/Shape" + op: "Shape" + input: "layer_0_qm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/layer_0_qm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients/layer_1_qm/MatMul_grad/MatMul" + input: "gradients/layer_0_qm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/layer_0_qm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_0_qm/Tanh" + input: "gradients/layer_0_qm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/layer_0_qm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/layer_0_qm/Tanh_grad/TanhGrad" + input: "layer_0_qm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/Reshape_39_grad/Shape" + op: "Shape" + input: "Slice_10" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_39_grad/Reshape" + op: "Reshape" + input: "gradients/layer_0_qm/MatMul_grad/MatMul" + input: "gradients/Reshape_39_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_10_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "gradients/Slice_10_grad/Shape" + op: "Shape" + input: "Slice_10" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_10_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_10_grad/stack" + op: "Pack" + input: "gradients/Slice_10_grad/Rank" + input: "gradients/Slice_10_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/Slice_10_grad/Reshape" + op: "Reshape" + input: "Slice_10/begin" + input: "gradients/Slice_10_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_10_grad/Shape_1" + op: "Shape" + input: "Cast_11" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_10_grad/sub" + op: "Sub" + input: "gradients/Slice_10_grad/Shape_1" + input: "gradients/Slice_10_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_10_grad/sub_1" + op: "Sub" + input: "gradients/Slice_10_grad/sub" + input: "Slice_10/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_10_grad/Reshape_1" + op: "Reshape" + input: "gradients/Slice_10_grad/sub_1" + input: "gradients/Slice_10_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_10_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_10_grad/concat" + op: "ConcatV2" + input: "gradients/Slice_10_grad/Reshape" + input: "gradients/Slice_10_grad/Reshape_1" + input: "gradients/Slice_10_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_10_grad/Pad" + op: "Pad" + input: "gradients/Reshape_39_grad/Reshape" + input: "gradients/Slice_10_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Cast_11_grad/Cast" + op: "Cast" + input: "gradients/Slice_10_grad/Pad" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients/Reshape_38_grad/Shape" + op: "Shape" + input: "concat_11" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_38_grad/Reshape" + op: "Reshape" + input: "gradients/Cast_11_grad/Cast" + input: "gradients/Reshape_38_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/concat_11_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/concat_11_grad/mod" + op: "FloorMod" + input: "concat_11/axis" + input: "gradients/concat_11_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/concat_11_grad/ShapeN" + op: "ShapeN" + input: "Reshape_37" + input: "Cast_10" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/concat_11_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients/concat_11_grad/mod" + input: "gradients/concat_11_grad/ShapeN" + input: "gradients/concat_11_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients/concat_11_grad/Slice" + op: "Slice" + input: "gradients/Reshape_38_grad/Reshape" + input: "gradients/concat_11_grad/ConcatOffset" + input: "gradients/concat_11_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients/Reshape_37_grad/Shape" + op: "Shape" + input: "Reshape_34" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_37_grad/Reshape" + op: "Reshape" + input: "gradients/concat_11_grad/Slice" + input: "gradients/Reshape_37_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_34_grad/Shape" + op: "Shape" + input: "o_descriptor_qm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_34_grad/Reshape" + op: "Reshape" + input: "gradients/Reshape_37_grad/Reshape" + input: "gradients/Reshape_34_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_32_grad/Shape" + op: "Shape" + input: "Cast_7" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_32_grad/Reshape" + op: "Reshape" + input: "gradients/Reshape_34_grad/Reshape" + input: "gradients/Reshape_32_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Cast_7_grad/Cast" + op: "Cast" + input: "gradients/Reshape_32_grad/Reshape" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients/Reshape_31_grad/Shape" + op: "Shape" + input: "MatMul_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_31_grad/Reshape" + op: "Reshape" + input: "gradients/Cast_7_grad/Cast" + input: "gradients/Reshape_31_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/MatMul_1_grad/MatMul" + op: "BatchMatMulV2" + input: "Slice_7" + input: "gradients/Reshape_31_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } +} +node { + name: "gradients/MatMul_1_grad/MatMul_1" + op: "BatchMatMulV2" + input: "truediv" + input: "gradients/Reshape_31_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "gradients/MatMul_1_grad/Shape" + op: "Shape" + input: "truediv" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/Shape_1" + op: "Shape" + input: "Slice_7" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice" + op: "StridedSlice" + input: "gradients/MatMul_1_grad/Shape" + input: "gradients/MatMul_1_grad/strided_slice/stack" + input: "gradients/MatMul_1_grad/strided_slice/stack_1" + input: "gradients/MatMul_1_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/MatMul_1_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients/MatMul_1_grad/Shape_1" + input: "gradients/MatMul_1_grad/strided_slice_1/stack" + input: "gradients/MatMul_1_grad/strided_slice_1/stack_1" + input: "gradients/MatMul_1_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/MatMul_1_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/MatMul_1_grad/strided_slice" + input: "gradients/MatMul_1_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/MatMul_1_grad/Sum" + op: "Sum" + input: "gradients/MatMul_1_grad/MatMul" + input: "gradients/MatMul_1_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/MatMul_1_grad/Reshape" + op: "Reshape" + input: "gradients/MatMul_1_grad/Sum" + input: "gradients/MatMul_1_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/MatMul_1_grad/Sum_1" + op: "Sum" + input: "gradients/MatMul_1_grad/MatMul_1" + input: "gradients/MatMul_1_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/MatMul_1_grad/Reshape_1" + op: "Reshape" + input: "gradients/MatMul_1_grad/Sum_1" + input: "gradients/MatMul_1_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_7_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "gradients/Slice_7_grad/Shape" + op: "Shape" + input: "Slice_7" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_7_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_7_grad/stack" + op: "Pack" + input: "gradients/Slice_7_grad/Rank" + input: "gradients/Slice_7_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/Slice_7_grad/Reshape" + op: "Reshape" + input: "Slice_7/begin" + input: "gradients/Slice_7_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_7_grad/Shape_1" + op: "Shape" + input: "truediv" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_7_grad/sub" + op: "Sub" + input: "gradients/Slice_7_grad/Shape_1" + input: "gradients/Slice_7_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_7_grad/sub_1" + op: "Sub" + input: "gradients/Slice_7_grad/sub" + input: "Slice_7/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_7_grad/Reshape_1" + op: "Reshape" + input: "gradients/Slice_7_grad/sub_1" + input: "gradients/Slice_7_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_7_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_7_grad/concat" + op: "ConcatV2" + input: "gradients/Slice_7_grad/Reshape" + input: "gradients/Slice_7_grad/Reshape_1" + input: "gradients/Slice_7_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_7_grad/Pad" + op: "Pad" + input: "gradients/MatMul_1_grad/Reshape_1" + input: "gradients/Slice_7_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN" + op: "AddN" + input: "gradients/MatMul_1_grad/Reshape" + input: "gradients/Slice_7_grad/Pad" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/MatMul_1_grad/Reshape" + } + } + } +} +node { + name: "gradients/truediv_grad/Shape" + op: "Shape" + input: "MatMul" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/truediv_grad/Shape_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + } + } + } + } + } +} +node { + name: "gradients/truediv_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/truediv_grad/Shape" + input: "gradients/truediv_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/truediv_grad/RealDiv" + op: "RealDiv" + input: "gradients/AddN" + input: "truediv/y" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/truediv_grad/Sum" + op: "Sum" + input: "gradients/truediv_grad/RealDiv" + input: "gradients/truediv_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/truediv_grad/Reshape" + op: "Reshape" + input: "gradients/truediv_grad/Sum" + input: "gradients/truediv_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/MatMul_grad/MatMul" + op: "BatchMatMulV2" + input: "Reshape_29" + input: "gradients/truediv_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } +} +node { + name: "gradients/MatMul_grad/MatMul_1" + op: "BatchMatMulV2" + input: "Reshape_30" + input: "gradients/truediv_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "gradients/MatMul_grad/Shape" + op: "Shape" + input: "Reshape_30" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/MatMul_grad/Shape_1" + op: "Shape" + input: "Reshape_29" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice" + op: "StridedSlice" + input: "gradients/MatMul_grad/Shape" + input: "gradients/MatMul_grad/strided_slice/stack" + input: "gradients/MatMul_grad/strided_slice/stack_1" + input: "gradients/MatMul_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients/MatMul_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients/MatMul_grad/Shape_1" + input: "gradients/MatMul_grad/strided_slice_1/stack" + input: "gradients/MatMul_grad/strided_slice_1/stack_1" + input: "gradients/MatMul_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients/MatMul_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/MatMul_grad/strided_slice" + input: "gradients/MatMul_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/MatMul_grad/Sum" + op: "Sum" + input: "gradients/MatMul_grad/MatMul" + input: "gradients/MatMul_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/MatMul_grad/Reshape" + op: "Reshape" + input: "gradients/MatMul_grad/Sum" + input: "gradients/MatMul_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/MatMul_grad/Sum_1" + op: "Sum" + input: "gradients/MatMul_grad/MatMul_1" + input: "gradients/MatMul_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/MatMul_grad/Reshape_1" + op: "Reshape" + input: "gradients/MatMul_grad/Sum_1" + input: "gradients/MatMul_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_30_grad/Shape" + op: "Shape" + input: "Slice_4" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_30_grad/Reshape" + op: "Reshape" + input: "gradients/MatMul_grad/Reshape" + input: "gradients/Reshape_30_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_29_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/add_6" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_29_grad/Reshape" + op: "Reshape" + input: "gradients/MatMul_grad/Reshape_1" + input: "gradients/Reshape_29_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_6_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/mul_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_6_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qm/add_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_6_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all_qm/add_6_grad/Shape" + input: "gradients/filter_type_all_qm/add_6_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_6_grad/Sum" + op: "Sum" + input: "gradients/Reshape_29_grad/Reshape" + input: "gradients/filter_type_all_qm/add_6_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_6_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_6_grad/Sum" + input: "gradients/filter_type_all_qm/add_6_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_6_grad/Sum_1" + op: "Sum" + input: "gradients/Reshape_29_grad/Reshape" + input: "gradients/filter_type_all_qm/add_6_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_6_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_6_grad/Sum_1" + input: "gradients/filter_type_all_qm/add_6_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/mul_2_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/add_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/mul_2_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qm/mul_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/mul_2_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all_qm/mul_2_grad/Shape" + input: "gradients/filter_type_all_qm/mul_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/mul_2_grad/Mul" + op: "Mul" + input: "gradients/filter_type_all_qm/add_6_grad/Reshape" + input: "filter_type_all_qm/mul_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/mul_2_grad/Sum" + op: "Sum" + input: "gradients/filter_type_all_qm/mul_2_grad/Mul" + input: "gradients/filter_type_all_qm/mul_2_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/mul_2_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/mul_2_grad/Sum" + input: "gradients/filter_type_all_qm/mul_2_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN_1" + op: "AddN" + input: "gradients/filter_type_all_qm/add_6_grad/Reshape_1" + input: "gradients/filter_type_all_qm/mul_2_grad/Reshape" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all_qm/add_6_grad/Reshape_1" + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_2_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/concat_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_2_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qm/Reshape_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_2_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all_qm/add_2_grad/Shape" + input: "gradients/filter_type_all_qm/add_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_2_grad/Sum" + op: "Sum" + input: "gradients/AddN_1" + input: "gradients/filter_type_all_qm/add_2_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_2_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_2_grad/Sum" + input: "gradients/filter_type_all_qm/add_2_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_2_grad/Sum_1" + op: "Sum" + input: "gradients/AddN_1" + input: "gradients/filter_type_all_qm/add_2_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_2_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_2_grad/Sum_1" + input: "gradients/filter_type_all_qm/add_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_2_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_2_grad/mod" + op: "FloorMod" + input: "filter_type_all_qm/concat_2/axis" + input: "gradients/filter_type_all_qm/concat_2_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_2_grad/ShapeN" + op: "ShapeN" + input: "filter_type_all_qm/add_1" + input: "filter_type_all_qm/add_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_2_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients/filter_type_all_qm/concat_2_grad/mod" + input: "gradients/filter_type_all_qm/concat_2_grad/ShapeN" + input: "gradients/filter_type_all_qm/concat_2_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_2_grad/Slice" + op: "Slice" + input: "gradients/filter_type_all_qm/add_2_grad/Reshape" + input: "gradients/filter_type_all_qm/concat_2_grad/ConcatOffset" + input: "gradients/filter_type_all_qm/concat_2_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_2_grad/Slice_1" + op: "Slice" + input: "gradients/filter_type_all_qm/add_2_grad/Reshape" + input: "gradients/filter_type_all_qm/concat_2_grad/ConcatOffset:1" + input: "gradients/filter_type_all_qm/concat_2_grad/ShapeN:1" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/Reshape_2_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/Tanh_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/Reshape_2_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_2_grad/Reshape_1" + input: "gradients/filter_type_all_qm/Reshape_2_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all_qm/Tanh_2_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all_qm/Tanh_2" + input: "gradients/filter_type_all_qm/Reshape_2_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/MatMul_2_grad/MatMul" + op: "MatMul" + input: "gradients/filter_type_all_qm/Tanh_2_grad/TanhGrad" + input: "filter_type_all_qm/matrix_3/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/AddN_2" + op: "AddN" + input: "gradients/filter_type_all_qm/concat_2_grad/Slice" + input: "gradients/filter_type_all_qm/concat_2_grad/Slice_1" + input: "gradients/filter_type_all_qm/MatMul_2_grad/MatMul" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all_qm/concat_2_grad/Slice" + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_1_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/concat_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_1_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qm/Reshape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_1_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all_qm/add_1_grad/Shape" + input: "gradients/filter_type_all_qm/add_1_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_1_grad/Sum" + op: "Sum" + input: "gradients/AddN_2" + input: "gradients/filter_type_all_qm/add_1_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_1_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_1_grad/Sum" + input: "gradients/filter_type_all_qm/add_1_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_1_grad/Sum_1" + op: "Sum" + input: "gradients/AddN_2" + input: "gradients/filter_type_all_qm/add_1_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_1_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_1_grad/Sum_1" + input: "gradients/filter_type_all_qm/add_1_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_1_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_1_grad/mod" + op: "FloorMod" + input: "filter_type_all_qm/concat_1/axis" + input: "gradients/filter_type_all_qm/concat_1_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_1_grad/ShapeN" + op: "ShapeN" + input: "filter_type_all_qm/add" + input: "filter_type_all_qm/add" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_1_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients/filter_type_all_qm/concat_1_grad/mod" + input: "gradients/filter_type_all_qm/concat_1_grad/ShapeN" + input: "gradients/filter_type_all_qm/concat_1_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_1_grad/Slice" + op: "Slice" + input: "gradients/filter_type_all_qm/add_1_grad/Reshape" + input: "gradients/filter_type_all_qm/concat_1_grad/ConcatOffset" + input: "gradients/filter_type_all_qm/concat_1_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_1_grad/Slice_1" + op: "Slice" + input: "gradients/filter_type_all_qm/add_1_grad/Reshape" + input: "gradients/filter_type_all_qm/concat_1_grad/ConcatOffset:1" + input: "gradients/filter_type_all_qm/concat_1_grad/ShapeN:1" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/Reshape_1_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/Tanh_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/Reshape_1_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_1_grad/Reshape_1" + input: "gradients/filter_type_all_qm/Reshape_1_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all_qm/Tanh_1_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all_qm/Tanh_1" + input: "gradients/filter_type_all_qm/Reshape_1_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/MatMul_1_grad/MatMul" + op: "MatMul" + input: "gradients/filter_type_all_qm/Tanh_1_grad/TanhGrad" + input: "filter_type_all_qm/matrix_2/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/AddN_3" + op: "AddN" + input: "gradients/filter_type_all_qm/concat_1_grad/Slice" + input: "gradients/filter_type_all_qm/concat_1_grad/Slice_1" + input: "gradients/filter_type_all_qm/MatMul_1_grad/MatMul" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all_qm/concat_1_grad/Slice" + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qm/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/add_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients/filter_type_all_qm/add_grad/Shape" + input: "gradients/filter_type_all_qm/add_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_grad/Sum" + op: "Sum" + input: "gradients/AddN_3" + input: "gradients/filter_type_all_qm/add_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_grad/Sum" + input: "gradients/filter_type_all_qm/add_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/add_grad/Sum_1" + op: "Sum" + input: "gradients/AddN_3" + input: "gradients/filter_type_all_qm/add_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients/filter_type_all_qm/add_grad/Reshape_1" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_grad/Sum_1" + input: "gradients/filter_type_all_qm/add_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_grad/mod" + op: "FloorMod" + input: "filter_type_all_qm/concat/axis" + input: "gradients/filter_type_all_qm/concat_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_grad/ShapeN" + op: "ShapeN" + input: "Reshape_27" + input: "Reshape_27" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients/filter_type_all_qm/concat_grad/mod" + input: "gradients/filter_type_all_qm/concat_grad/ShapeN" + input: "gradients/filter_type_all_qm/concat_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_grad/Slice" + op: "Slice" + input: "gradients/filter_type_all_qm/add_grad/Reshape" + input: "gradients/filter_type_all_qm/concat_grad/ConcatOffset" + input: "gradients/filter_type_all_qm/concat_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/concat_grad/Slice_1" + op: "Slice" + input: "gradients/filter_type_all_qm/add_grad/Reshape" + input: "gradients/filter_type_all_qm/concat_grad/ConcatOffset:1" + input: "gradients/filter_type_all_qm/concat_grad/ShapeN:1" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/Reshape_grad/Shape" + op: "Shape" + input: "filter_type_all_qm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/filter_type_all_qm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients/filter_type_all_qm/add_grad/Reshape_1" + input: "gradients/filter_type_all_qm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients/filter_type_all_qm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all_qm/Tanh" + input: "gradients/filter_type_all_qm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients/filter_type_all_qm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients/filter_type_all_qm/Tanh_grad/TanhGrad" + input: "filter_type_all_qm/matrix_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients/AddN_4" + op: "AddN" + input: "gradients/filter_type_all_qm/concat_grad/Slice" + input: "gradients/filter_type_all_qm/concat_grad/Slice_1" + input: "gradients/filter_type_all_qm/MatMul_grad/MatMul" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/filter_type_all_qm/concat_grad/Slice" + } + } + } +} +node { + name: "gradients/Reshape_27_grad/Shape" + op: "Shape" + input: "Slice_5" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_27_grad/Reshape" + op: "Reshape" + input: "gradients/AddN_4" + input: "gradients/Reshape_27_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_5_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/Slice_5_grad/Shape" + op: "Shape" + input: "Slice_5" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_5_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_5_grad/stack" + op: "Pack" + input: "gradients/Slice_5_grad/Rank" + input: "gradients/Slice_5_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/Slice_5_grad/Reshape" + op: "Reshape" + input: "Slice_5/begin" + input: "gradients/Slice_5_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_5_grad/Shape_1" + op: "Shape" + input: "Reshape_26" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_5_grad/sub" + op: "Sub" + input: "gradients/Slice_5_grad/Shape_1" + input: "gradients/Slice_5_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_5_grad/sub_1" + op: "Sub" + input: "gradients/Slice_5_grad/sub" + input: "Slice_5/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_5_grad/Reshape_1" + op: "Reshape" + input: "gradients/Slice_5_grad/sub_1" + input: "gradients/Slice_5_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_5_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_5_grad/concat" + op: "ConcatV2" + input: "gradients/Slice_5_grad/Reshape" + input: "gradients/Slice_5_grad/Reshape_1" + input: "gradients/Slice_5_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_5_grad/Pad" + op: "Pad" + input: "gradients/Reshape_27_grad/Reshape" + input: "gradients/Slice_5_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_26_grad/Shape" + op: "Shape" + input: "Slice_4" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_26_grad/Reshape" + op: "Reshape" + input: "gradients/Slice_5_grad/Pad" + input: "gradients/Reshape_26_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/AddN_5" + op: "AddN" + input: "gradients/Reshape_30_grad/Reshape" + input: "gradients/Reshape_26_grad/Reshape" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients/Reshape_30_grad/Reshape" + } + } + } +} +node { + name: "gradients/Slice_4_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients/Slice_4_grad/Shape" + op: "Shape" + input: "Slice_4" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_4_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_4_grad/stack" + op: "Pack" + input: "gradients/Slice_4_grad/Rank" + input: "gradients/Slice_4_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients/Slice_4_grad/Reshape" + op: "Reshape" + input: "Slice_4/begin" + input: "gradients/Slice_4_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/Shape_1" + op: "Shape" + input: "Cast_5" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Slice_4_grad/sub" + op: "Sub" + input: "gradients/Slice_4_grad/Shape_1" + input: "gradients/Slice_4_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/sub_1" + op: "Sub" + input: "gradients/Slice_4_grad/sub" + input: "Slice_4/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/Reshape_1" + op: "Reshape" + input: "gradients/Slice_4_grad/sub_1" + input: "gradients/Slice_4_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients/Slice_4_grad/concat" + op: "ConcatV2" + input: "gradients/Slice_4_grad/Reshape" + input: "gradients/Slice_4_grad/Reshape_1" + input: "gradients/Slice_4_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Slice_4_grad/Pad" + op: "Pad" + input: "gradients/AddN_5" + input: "gradients/Slice_4_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Cast_5_grad/Cast" + op: "Cast" + input: "gradients/Slice_4_grad/Pad" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients/Reshape_24_grad/Shape" + op: "Shape" + input: "Reshape_23" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_24_grad/Reshape" + op: "Reshape" + input: "gradients/Cast_5_grad/Cast" + input: "gradients/Reshape_24_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients/Reshape_23_grad/Shape" + op: "Shape" + input: "o_rmat_qm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients/Reshape_23_grad/Reshape" + op: "Reshape" + input: "gradients/Reshape_24_grad/Reshape" + input: "gradients/Reshape_23_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_43/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_43/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_43/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_43" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_43/stack" + input: "strided_slice_43/stack_1" + input: "strided_slice_43/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_10/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 96 + } + } + } +} +node { + name: "mul_10" + op: "Mul" + input: "strided_slice_43" + input: "mul_10/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_46/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_46/shape" + op: "Pack" + input: "Reshape_46/shape/0" + input: "mul_10" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_46" + op: "Reshape" + input: "gradients/Reshape_23_grad/Reshape" + input: "Reshape_46/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ProdForceSeA" + op: "ProdForceSeA" + input: "Reshape_46" + input: "o_rmat_deriv_qm" + input: "o_nlist_qm" + input: "DprcPairwiseIdx:4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "n_a_sel" + value { + i: 24 + } + } + attr { + key: "n_r_sel" + value { + i: 0 + } + } +} +node { + name: "ProdVirialSeA" + op: "ProdVirialSeA" + input: "Reshape_46" + input: "o_rmat_deriv_qm" + input: "o_rij_qm" + input: "o_nlist_qm" + input: "DprcPairwiseIdx:4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "n_a_sel" + value { + i: 24 + } + } + attr { + key: "n_r_sel" + value { + i: 0 + } + } +} +node { + name: "strided_slice_44/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_44/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_44/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_44" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_44/stack" + input: "strided_slice_44/stack_1" + input: "strided_slice_44/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_11/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_11" + op: "Mul" + input: "mul_11/x" + input: "strided_slice_44" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_47/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_47/shape" + op: "Pack" + input: "Reshape_47/shape/0" + input: "mul_11" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_47" + op: "Reshape" + input: "ProdForceSeA" + input: "Reshape_47/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_45/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_45/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_45/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_45" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_45/stack" + input: "strided_slice_45/stack_1" + input: "strided_slice_45/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_12/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_12" + op: "Mul" + input: "mul_12/x" + input: "strided_slice_45" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_force_qm/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_force_qm/shape" + op: "Pack" + input: "o_force_qm/shape/0" + input: "mul_12" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_force_qm" + op: "Reshape" + input: "Reshape_47" + input: "o_force_qm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_virial_qm/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "o_virial_qm" + op: "Reshape" + input: "ProdVirialSeA" + input: "o_virial_qm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_46/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_46/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_46/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_46" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_46/stack" + input: "strided_slice_46/stack_1" + input: "strided_slice_46/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_13/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 9 + } + } + } +} +node { + name: "mul_13" + op: "Mul" + input: "mul_13/x" + input: "strided_slice_46" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_atom_virial_qm/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_atom_virial_qm/shape" + op: "Pack" + input: "o_atom_virial_qm/shape/0" + input: "mul_13" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_atom_virial_qm" + op: "Reshape" + input: "ProdVirialSeA:1" + input: "o_atom_virial_qm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_47/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_47/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_47/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_47" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_47/stack" + input: "strided_slice_47/stack_1" + input: "strided_slice_47/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_14/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_14" + op: "Mul" + input: "strided_slice_47" + input: "mul_14/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_48/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_48/shape" + op: "Pack" + input: "Reshape_48/shape/0" + input: "mul_14" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_48" + op: "Reshape" + input: "GatherV2_3" + input: "Reshape_48/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_48/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_48/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_48/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_48" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_48/stack" + input: "strided_slice_48/stack_1" + input: "strided_slice_48/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_49/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_49/shape" + op: "Pack" + input: "Reshape_49/shape/0" + input: "strided_slice_48" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_49" + op: "Reshape" + input: "GatherV2_5" + input: "Reshape_49/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_17" + op: "Shape" + input: "Reshape_48" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_49/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_49/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_49/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_49" + op: "StridedSlice" + input: "Shape_17" + input: "strided_slice_49/stack" + input: "strided_slice_49/stack_1" + input: "strided_slice_49/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "descrpt_attr_qmmm/sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 27 + } + } + } +} +node { + name: "descrpt_attr_qmmm/original_sel" + op: "Const" + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 27 + } + } + } +} +node { + name: "descrpt_attr_qmmm/t_avg" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 6 + } + dim { + size: 108 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "descrpt_attr_qmmm/t_avg/read" + op: "Identity" + input: "descrpt_attr_qmmm/t_avg" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr_qmmm/t_avg" + } + } + } +} +node { + name: "descrpt_attr_qmmm/t_std" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 6 + } + dim { + size: 108 + } + } + tensor_content: "Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?Z(^\225\340\215\314?\322\2418\227~`\312?\322\2418\227~`\312?\322\2418\227~`\312?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?zKb\226SU\303?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\344\232\210\002\013p\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\033\301\010\211\322\033\303?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\332\240\004\304 \321\305?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\\\327\"\236\207?\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?\212\217\241\303\031V\306?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?Tg\010\230S!\320?6\257\362\316[\226\307?6\257\362\316[\226\307?6\257\362\316[\226\307?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?<\232Cu\314]\313?\347j\370\021B\373\304?\347j\370\021B\373\304?\347j\370\021B\373\304?" + } + } + } +} +node { + name: "descrpt_attr_qmmm/t_std/read" + op: "Identity" + input: "descrpt_attr_qmmm/t_std" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@descrpt_attr_qmmm/t_std" + } + } + } +} +node { + name: "strided_slice_50/stack" + op: "Const" + input: "^descrpt_attr_qmmm/original_sel" + input: "^descrpt_attr_qmmm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_50/stack_1" + op: "Const" + input: "^descrpt_attr_qmmm/original_sel" + input: "^descrpt_attr_qmmm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_50/stack_2" + op: "Const" + input: "^descrpt_attr_qmmm/original_sel" + input: "^descrpt_attr_qmmm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_50" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_50/stack" + input: "strided_slice_50/stack_1" + input: "strided_slice_50/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_15/y" + op: "Const" + input: "^descrpt_attr_qmmm/original_sel" + input: "^descrpt_attr_qmmm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_15" + op: "Mul" + input: "strided_slice_50" + input: "mul_15/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_50/shape/0" + op: "Const" + input: "^descrpt_attr_qmmm/original_sel" + input: "^descrpt_attr_qmmm/sel" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_50/shape" + op: "Pack" + input: "Reshape_50/shape/0" + input: "mul_15" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_50" + op: "Reshape" + input: "Reshape_48" + input: "Reshape_50/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_51/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "Reshape_51" + op: "Reshape" + input: "GatherV2_6" + input: "Reshape_51/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_51/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_51/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_51/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_51" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_51/stack" + input: "strided_slice_51/stack_1" + input: "strided_slice_51/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_52/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_52/shape" + op: "Pack" + input: "Reshape_52/shape/0" + input: "strided_slice_51" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_52" + op: "Reshape" + input: "Reshape_49" + input: "Reshape_52/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ProdEnvMatAMix_1/mesh" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "ProdEnvMatAMix_1" + op: "ProdEnvMatAMix" + input: "Reshape_50" + input: "Reshape_52" + input: "DprcPairwiseIdx:5" + input: "Reshape_51" + input: "ProdEnvMatAMix_1/mesh" + input: "descrpt_attr_qmmm/t_avg/read" + input: "descrpt_attr_qmmm/t_std/read" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "rcut_a" + value { + f: -1.0 + } + } + attr { + key: "rcut_r" + value { + f: 6.0 + } + } + attr { + key: "rcut_r_smth" + value { + f: 0.5 + } + } + attr { + key: "sel_a" + value { + list { + i: 27 + } + } + } + attr { + key: "sel_r" + value { + list { + i: 0 + } + } + } +} +node { + name: "Reshape_53/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_53" + op: "Reshape" + input: "ProdEnvMatAMix_1:4" + input: "Reshape_53/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_55/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377l\000\000\000" + } + } + } +} +node { + name: "Reshape_55" + op: "Reshape" + input: "ProdEnvMatAMix_1" + input: "Reshape_55/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value_3/Minimum/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 5 + } + } + } +} +node { + name: "clip_by_value_3/Minimum" + op: "Minimum" + input: "Reshape_52" + input: "clip_by_value_3/Minimum/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value_3/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "clip_by_value_3" + op: "Maximum" + input: "clip_by_value_3/Minimum" + input: "clip_by_value_3/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_52/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_52/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_52/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_52" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_52/stack" + input: "strided_slice_52/stack_1" + input: "strided_slice_52/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_12/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_12/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_12/size" + op: "Pack" + input: "Slice_12/size/0" + input: "strided_slice_52" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_12" + op: "Slice" + input: "clip_by_value_3" + input: "Slice_12/begin" + input: "Slice_12/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_56/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_56" + op: "Reshape" + input: "Slice_12" + input: "Reshape_56/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_rmat_qmmm" + op: "Identity" + input: "Reshape_55" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rmat_deriv_qmmm" + op: "Identity" + input: "ProdEnvMatAMix_1:1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_rij_qmmm" + op: "Identity" + input: "ProdEnvMatAMix_1:2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_nlist_qmmm" + op: "Identity" + input: "ProdEnvMatAMix_1:3" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_57/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\006\000\000\000\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_57" + op: "Reshape" + input: "descrpt_attr_qmmm/t_avg/read" + input: "Reshape_57/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_13/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_13/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\001\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Slice_13" + op: "Slice" + input: "Reshape_57" + input: "Slice_13/begin" + input: "Slice_13/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_58/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\006\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Reshape_58" + op: "Reshape" + input: "Slice_13" + input: "Reshape_58/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_59/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\006\000\000\000\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_59" + op: "Reshape" + input: "descrpt_attr_qmmm/t_std/read" + input: "Reshape_59/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_14/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_14/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\001\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Slice_14" + op: "Slice" + input: "Reshape_59" + input: "Slice_14/begin" + input: "Slice_14/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_60/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\006\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "Reshape_60" + op: "Reshape" + input: "Slice_14" + input: "Reshape_60/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_4/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_58" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_4" + op: "GatherV2" + input: "Reshape_58" + input: "Reshape_56" + input: "embedding_lookup_4/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_58" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_4/Identity" + op: "Identity" + input: "embedding_lookup_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_53/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_53/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_53/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_53" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_53/stack" + input: "strided_slice_53/stack_1" + input: "strided_slice_53/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_61/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_61/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "Reshape_61/shape" + op: "Pack" + input: "Reshape_61/shape/0" + input: "strided_slice_53" + input: "Reshape_61/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_61" + op: "Reshape" + input: "embedding_lookup_4/Identity" + input: "Reshape_61/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_5/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_60" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_5" + op: "GatherV2" + input: "Reshape_60" + input: "Reshape_56" + input: "embedding_lookup_5/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_60" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_5/Identity" + op: "Identity" + input: "embedding_lookup_5" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_54/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_54/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_54/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_54" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_54/stack" + input: "strided_slice_54/stack_1" + input: "strided_slice_54/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_62/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_62/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "Reshape_62/shape" + op: "Pack" + input: "Reshape_62/shape/0" + input: "strided_slice_54" + input: "Reshape_62/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_62" + op: "Reshape" + input: "embedding_lookup_5/Identity" + input: "Reshape_62/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_63/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_63" + op: "Reshape" + input: "ProdEnvMatAMix_1" + input: "Reshape_63/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_15/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_15/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "Slice_15" + op: "Slice" + input: "Reshape_63" + input: "Slice_15/begin" + input: "Slice_15/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_55/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_55/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_55/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_55" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_55/stack" + input: "strided_slice_55/stack_1" + input: "strided_slice_55/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_64/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_64/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 27 + } + } + } +} +node { + name: "Reshape_64/shape" + op: "Pack" + input: "Reshape_64/shape/0" + input: "strided_slice_55" + input: "Reshape_64/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_64" + op: "Reshape" + input: "Slice_15" + input: "Reshape_64/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "mul_17" + op: "Mul" + input: "Reshape_64" + input: "Reshape_62" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "add_7" + op: "AddV2" + input: "mul_17" + input: "Reshape_61" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_18/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.5 + } + } + } +} +node { + name: "mul_18" + op: "Mul" + input: "mul_18/x" + input: "add_7" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "sub_4/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "sub_4" + op: "Sub" + input: "sub_4/x" + input: "mul_18" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Neg_1" + op: "Neg" + input: "sub_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_19" + op: "Mul" + input: "Neg_1" + input: "sub_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_20" + op: "Mul" + input: "mul_19" + input: "sub_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "add_8/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "add_8" + op: "AddV2" + input: "mul_20" + input: "add_8/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "clip_by_value_4/Minimum/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "clip_by_value_4/Minimum" + op: "Minimum" + input: "add_8" + input: "clip_by_value_4/Minimum/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "clip_by_value_4/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.0 + } + } + } +} +node { + name: "clip_by_value_4" + op: "Maximum" + input: "clip_by_value_4/Minimum" + input: "clip_by_value_4/y" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Cast_16" + op: "Cast" + input: "clip_by_value_4" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "strided_slice_56/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_56/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_56/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_56" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_56/stack" + input: "strided_slice_56/stack_1" + input: "strided_slice_56/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_65/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_65/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 108 + } + } + } +} +node { + name: "Reshape_65/shape" + op: "Pack" + input: "Reshape_65/shape/0" + input: "strided_slice_56" + input: "Reshape_65/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_65" + op: "Reshape" + input: "o_rmat_qmmm" + input: "Reshape_65/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_66/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377l\000\000\000" + } + } + } +} +node { + name: "Reshape_66" + op: "Reshape" + input: "Reshape_65" + input: "Reshape_66/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_18" + op: "Shape" + input: "Reshape_66" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_57/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_57/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_57/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_57" + op: "StridedSlice" + input: "Shape_18" + input: "strided_slice_57/stack" + input: "strided_slice_57/stack_1" + input: "strided_slice_57/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Const_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 6 + } + dim { + size: 7 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\360?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\360?" + } + } + } +} +node { + name: "Reshape_67/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_67" + op: "Reshape" + input: "Const_1" + input: "Reshape_67/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_68/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "Reshape_68" + op: "Reshape" + input: "Reshape_56" + input: "Reshape_68/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "mul_21/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 7 + } + } + } +} +node { + name: "mul_21" + op: "Mul" + input: "Reshape_68" + input: "mul_21/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Tile/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\001\000\000\000l\000\000\000" + } + } + } +} +node { + name: "Tile" + op: "Tile" + input: "mul_21" + input: "Tile/multiples" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_69/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 27 + } + } + } +} +node { + name: "Reshape_69/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "Reshape_69/shape" + op: "Pack" + input: "strided_slice_57" + input: "Reshape_69/shape/1" + input: "Reshape_69/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_69" + op: "Reshape" + input: "Reshape_53" + input: "Reshape_69/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Tile_1/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\001\000\000\000\001\000\000\000\004\000\000\000" + } + } + } +} +node { + name: "Tile_1" + op: "Tile" + input: "Reshape_69" + input: "Tile_1/multiples" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_70/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 108 + } + } + } +} +node { + name: "Reshape_70/shape" + op: "Pack" + input: "strided_slice_57" + input: "Reshape_70/shape/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_70" + op: "Reshape" + input: "Tile_1" + input: "Reshape_70/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "add_9" + op: "AddV2" + input: "Tile" + input: "Reshape_70" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_71/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_71" + op: "Reshape" + input: "add_9" + input: "Reshape_71/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_6/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_67" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_6" + op: "GatherV2" + input: "Reshape_67" + input: "Reshape_71" + input: "embedding_lookup_6/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@Reshape_67" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_6/Identity" + op: "Identity" + input: "embedding_lookup_6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_72/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377l\000\000\000" + } + } + } +} +node { + name: "Reshape_72" + op: "Reshape" + input: "embedding_lookup_6/Identity" + input: "Reshape_72/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "mul_22" + op: "Mul" + input: "Reshape_66" + input: "Reshape_72" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Cast_17" + op: "Cast" + input: "mul_22" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Slice_16/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_16/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377l\000\000\000" + } + } + } +} +node { + name: "Slice_16" + op: "Slice" + input: "Cast_17" + input: "Slice_16/begin" + input: "Slice_16/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Shape_20" + op: "Shape" + input: "Slice_16" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_60/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_60/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_60/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_60" + op: "StridedSlice" + input: "Shape_20" + input: "strided_slice_60/stack" + input: "strided_slice_60/stack_1" + input: "strided_slice_60/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_74/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Reshape_74" + op: "Reshape" + input: "Slice_16" + input: "Reshape_74/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Slice_17/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_17/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "Slice_17" + op: "Slice" + input: "Reshape_74" + input: "Slice_17/begin" + input: "Slice_17/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_75/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "Reshape_75" + op: "Reshape" + input: "Slice_17" + input: "Reshape_75/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/matrix_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + dim { + size: 2 + } + } + tensor_content: "\025,/>\337\222\224\274" + } + } + } +} +node { + name: "filter_type_all_qmmm/matrix_1/read" + op: "Identity" + input: "filter_type_all_qmmm/matrix_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/matrix_1" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "k\313\226\277\270\2765\277" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_1/read" + op: "Identity" + input: "filter_type_all_qmmm/bias_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/bias_1" + } + } + } +} +node { + name: "filter_type_all_qmmm/MatMul" + op: "MatMul" + input: "Reshape_75" + input: "filter_type_all_qmmm/matrix_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qmmm/BiasAdd" + op: "BiasAdd" + input: "filter_type_all_qmmm/MatMul" + input: "filter_type_all_qmmm/bias_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qmmm/Tanh" + op: "Tanh" + input: "filter_type_all_qmmm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape" + op: "Reshape" + input: "filter_type_all_qmmm/Tanh" + input: "filter_type_all_qmmm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qmmm/concat" + op: "ConcatV2" + input: "Reshape_75" + input: "Reshape_75" + input: "filter_type_all_qmmm/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/add" + op: "AddV2" + input: "filter_type_all_qmmm/concat" + input: "filter_type_all_qmmm/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/matrix_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 4 + } + } + tensor_content: "\364\000\235\274\271\244\373\276C\345\363>\317\257\364>\336\315\005\276\263&\237\275\305\332\024\277p7\310>" + } + } + } +} +node { + name: "filter_type_all_qmmm/matrix_2/read" + op: "Identity" + input: "filter_type_all_qmmm/matrix_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/matrix_2" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + tensor_content: "J\223\376\276n\333\375>\306A\010?\347\234\223\276" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_2/read" + op: "Identity" + input: "filter_type_all_qmmm/bias_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/bias_2" + } + } + } +} +node { + name: "filter_type_all_qmmm/MatMul_1" + op: "MatMul" + input: "filter_type_all_qmmm/add" + input: "filter_type_all_qmmm/matrix_2/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qmmm/BiasAdd_1" + op: "BiasAdd" + input: "filter_type_all_qmmm/MatMul_1" + input: "filter_type_all_qmmm/bias_2/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qmmm/Tanh_1" + op: "Tanh" + input: "filter_type_all_qmmm/BiasAdd_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_1/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_1" + op: "Reshape" + input: "filter_type_all_qmmm/Tanh_1" + input: "filter_type_all_qmmm/Reshape_1/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/concat_1/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qmmm/concat_1" + op: "ConcatV2" + input: "filter_type_all_qmmm/add" + input: "filter_type_all_qmmm/add" + input: "filter_type_all_qmmm/concat_1/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/add_1" + op: "AddV2" + input: "filter_type_all_qmmm/concat_1" + input: "filter_type_all_qmmm/Reshape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/matrix_3" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + dim { + size: 8 + } + } + tensor_content: "\037\356\361>k\024\224>\322\335\271>\220\016\005>\306\231\244>\014\352\257\276\340G\335\275\225\242\202\276rC\"\274\204a/\276\010P\260\275F<\023\277D\025\365=`\217\302>3\000+\276\251\307\005?C\235\030\276\376v\365=\330\226\331\276\261\003\230>\206\356H>\324\306\340\274\361L\224\276W2B?&\214K\276v\251A\276\350a\213>\006\202\177>o\222U\276$m\230\276\313\335\300>h\255\243=" + } + } + } +} +node { + name: "filter_type_all_qmmm/matrix_3/read" + op: "Identity" + input: "filter_type_all_qmmm/matrix_3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/matrix_3" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_3" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 8 + } + } + tensor_content: "g\264\263?\272E\330\277\244iX\276`8\223?\302\227\301?Q]\333>\000\341,\300m\030\204=" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_3/read" + op: "Identity" + input: "filter_type_all_qmmm/bias_3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/bias_3" + } + } + } +} +node { + name: "filter_type_all_qmmm/MatMul_2" + op: "MatMul" + input: "filter_type_all_qmmm/add_1" + input: "filter_type_all_qmmm/matrix_3/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qmmm/BiasAdd_2" + op: "BiasAdd" + input: "filter_type_all_qmmm/MatMul_2" + input: "filter_type_all_qmmm/bias_3/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qmmm/Tanh_2" + op: "Tanh" + input: "filter_type_all_qmmm/BiasAdd_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_2/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_2" + op: "Reshape" + input: "filter_type_all_qmmm/Tanh_2" + input: "filter_type_all_qmmm/Reshape_2/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/concat_2/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qmmm/concat_2" + op: "ConcatV2" + input: "filter_type_all_qmmm/add_1" + input: "filter_type_all_qmmm/add_1" + input: "filter_type_all_qmmm/concat_2/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/add_2" + op: "AddV2" + input: "filter_type_all_qmmm/concat_2" + input: "filter_type_all_qmmm/Reshape_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_3/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_3" + op: "Reshape" + input: "Reshape_56" + input: "filter_type_all_qmmm/Reshape_3/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/mul/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 7 + } + } + } +} +node { + name: "filter_type_all_qmmm/mul" + op: "Mul" + input: "filter_type_all_qmmm/Reshape_3" + input: "filter_type_all_qmmm/mul/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Tile/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\001\000\000\000\033\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Tile" + op: "Tile" + input: "filter_type_all_qmmm/mul" + input: "filter_type_all_qmmm/Tile/multiples" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_4/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\033\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_4" + op: "Reshape" + input: "Reshape_53" + input: "filter_type_all_qmmm/Reshape_4/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/add_3" + op: "AddV2" + input: "filter_type_all_qmmm/Tile" + input: "filter_type_all_qmmm/Reshape_4" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_5/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_5" + op: "Reshape" + input: "filter_type_all_qmmm/add_3" + input: "filter_type_all_qmmm/Reshape_5/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_6/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\001\000\000\000\007\000\000\000\377\377\377\377" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_6" + op: "Reshape" + input: "t_typeebd" + input: "filter_type_all_qmmm/Reshape_6/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Tile_1/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\007\000\000\000\001\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Tile_1" + op: "Tile" + input: "filter_type_all_qmmm/Reshape_6" + input: "filter_type_all_qmmm/Tile_1/multiples" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_7/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\007\000\000\000\001\000\000\000\377\377\377\377" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_7" + op: "Reshape" + input: "t_typeebd" + input: "filter_type_all_qmmm/Reshape_7/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Tile_2/multiples" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\001\000\000\000\007\000\000\000\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Tile_2" + op: "Tile" + input: "filter_type_all_qmmm/Reshape_7" + input: "filter_type_all_qmmm/Tile_2/multiples" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tmultiples" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/concat_3/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "filter_type_all_qmmm/concat_3" + op: "ConcatV2" + input: "filter_type_all_qmmm/Tile_1" + input: "filter_type_all_qmmm/Tile_2" + input: "filter_type_all_qmmm/concat_3/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_8/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\020\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_8" + op: "Reshape" + input: "filter_type_all_qmmm/concat_3" + input: "filter_type_all_qmmm/Reshape_8/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/matrix_1_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 16 + } + dim { + size: 2 + } + } + tensor_content: "\n\007\217=\257\236\362\273IGS\274\rS\021<,\305\032?\260\300\034\276J7%\276\352\343\322=\303\311\263>\302\243g\275+a@<\000\207\311=\365\234\337<@\356S>C\221\206=\343D\277\276\217\003\212>1e\200>\251l\210>\210\337\237>\361\327\376>\230lP\276\023D\336\276\361@\345=U\003o\276\'\017\037\276\326\303\000\276\002\374F>\255\256\211>\364D\014?\346\232%<\375\251\204;" + } + } + } +} +node { + name: "filter_type_all_qmmm/matrix_1_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qmmm/matrix_1_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/matrix_1_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_1_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "k\313\226\277\270\2765\277" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_1_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qmmm/bias_1_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/bias_1_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qmmm/MatMul_3" + op: "MatMul" + input: "filter_type_all_qmmm/Reshape_8" + input: "filter_type_all_qmmm/matrix_1_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qmmm/BiasAdd_3" + op: "BiasAdd" + input: "filter_type_all_qmmm/MatMul_3" + input: "filter_type_all_qmmm/bias_1_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qmmm/Tanh_3" + op: "Tanh" + input: "filter_type_all_qmmm/BiasAdd_3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_9/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_9" + op: "Reshape" + input: "filter_type_all_qmmm/Tanh_3" + input: "filter_type_all_qmmm/Reshape_9/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/matrix_2_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 4 + } + } + tensor_content: "\364\000\235\274\271\244\373\276C\345\363>\317\257\364>\336\315\005\276\263&\237\275\305\332\024\277p7\310>" + } + } + } +} +node { + name: "filter_type_all_qmmm/matrix_2_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qmmm/matrix_2_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/matrix_2_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_2_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + tensor_content: "J\223\376\276n\333\375>\306A\010?\347\234\223\276" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_2_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qmmm/bias_2_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/bias_2_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qmmm/MatMul_4" + op: "MatMul" + input: "filter_type_all_qmmm/Reshape_9" + input: "filter_type_all_qmmm/matrix_2_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qmmm/BiasAdd_4" + op: "BiasAdd" + input: "filter_type_all_qmmm/MatMul_4" + input: "filter_type_all_qmmm/bias_2_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qmmm/Tanh_4" + op: "Tanh" + input: "filter_type_all_qmmm/BiasAdd_4" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_10/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_10" + op: "Reshape" + input: "filter_type_all_qmmm/Tanh_4" + input: "filter_type_all_qmmm/Reshape_10/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/concat_4/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qmmm/concat_4" + op: "ConcatV2" + input: "filter_type_all_qmmm/Reshape_9" + input: "filter_type_all_qmmm/Reshape_9" + input: "filter_type_all_qmmm/concat_4/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/add_4" + op: "AddV2" + input: "filter_type_all_qmmm/concat_4" + input: "filter_type_all_qmmm/Reshape_10" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/matrix_3_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + dim { + size: 8 + } + } + tensor_content: "\037\356\361>k\024\224>\322\335\271>\220\016\005>\306\231\244>\014\352\257\276\340G\335\275\225\242\202\276rC\"\274\204a/\276\010P\260\275F<\023\277D\025\365=`\217\302>3\000+\276\251\307\005?C\235\030\276\376v\365=\330\226\331\276\261\003\230>\206\356H>\324\306\340\274\361L\224\276W2B?&\214K\276v\251A\276\350a\213>\006\202\177>o\222U\276$m\230\276\313\335\300>h\255\243=" + } + } + } +} +node { + name: "filter_type_all_qmmm/matrix_3_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qmmm/matrix_3_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/matrix_3_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_3_two_side_ebd" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 8 + } + } + tensor_content: "g\264\263?\272E\330\277\244iX\276`8\223?\302\227\301?Q]\333>\000\341,\300m\030\204=" + } + } + } +} +node { + name: "filter_type_all_qmmm/bias_3_two_side_ebd/read" + op: "Identity" + input: "filter_type_all_qmmm/bias_3_two_side_ebd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/bias_3_two_side_ebd" + } + } + } +} +node { + name: "filter_type_all_qmmm/MatMul_5" + op: "MatMul" + input: "filter_type_all_qmmm/add_4" + input: "filter_type_all_qmmm/matrix_3_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "filter_type_all_qmmm/BiasAdd_5" + op: "BiasAdd" + input: "filter_type_all_qmmm/MatMul_5" + input: "filter_type_all_qmmm/bias_3_two_side_ebd/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "filter_type_all_qmmm/Tanh_5" + op: "Tanh" + input: "filter_type_all_qmmm/BiasAdd_5" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_11/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\010\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_11" + op: "Reshape" + input: "filter_type_all_qmmm/Tanh_5" + input: "filter_type_all_qmmm/Reshape_11/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/concat_5/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "filter_type_all_qmmm/concat_5" + op: "ConcatV2" + input: "filter_type_all_qmmm/add_4" + input: "filter_type_all_qmmm/add_4" + input: "filter_type_all_qmmm/concat_5/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/add_5" + op: "AddV2" + input: "filter_type_all_qmmm/concat_5" + input: "filter_type_all_qmmm/Reshape_11" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/embedding_lookup/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/add_5" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "filter_type_all_qmmm/embedding_lookup" + op: "GatherV2" + input: "filter_type_all_qmmm/add_5" + input: "filter_type_all_qmmm/Reshape_5" + input: "filter_type_all_qmmm/embedding_lookup/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@filter_type_all_qmmm/add_5" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "filter_type_all_qmmm/embedding_lookup/Identity" + op: "Identity" + input: "filter_type_all_qmmm/embedding_lookup" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_12/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\001\000\000\000" + } + } + } +} +node { + name: "filter_type_all_qmmm/Reshape_12" + op: "Reshape" + input: "Cast_16" + input: "filter_type_all_qmmm/Reshape_12/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "filter_type_all_qmmm/mul_1" + op: "Mul" + input: "filter_type_all_qmmm/embedding_lookup/Identity" + input: "filter_type_all_qmmm/Reshape_12" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/mul_2" + op: "Mul" + input: "filter_type_all_qmmm/add_2" + input: "filter_type_all_qmmm/mul_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "filter_type_all_qmmm/add_6" + op: "AddV2" + input: "filter_type_all_qmmm/mul_2" + input: "filter_type_all_qmmm/add_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_77/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\033\000\000\000\010\000\000\000" + } + } + } +} +node { + name: "Reshape_77" + op: "Reshape" + input: "filter_type_all_qmmm/add_6" + input: "Reshape_77/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_78/shape/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 27 + } + } + } +} +node { + name: "Reshape_78/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 4 + } + } + } +} +node { + name: "Reshape_78/shape" + op: "Pack" + input: "strided_slice_60" + input: "Reshape_78/shape/1" + input: "Reshape_78/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_78" + op: "Reshape" + input: "Slice_16" + input: "Reshape_78/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "MatMul_2" + op: "BatchMatMulV2" + input: "Reshape_78" + input: "Reshape_77" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "truediv_1/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 27.0 + } + } + } +} +node { + name: "truediv_1" + op: "RealDiv" + input: "MatMul_2" + input: "truediv_1/y" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Slice_19/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_19/size" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\377\377\377\377\377\377\377\377\004\000\000\000" + } + } + } +} +node { + name: "Slice_19" + op: "Slice" + input: "truediv_1" + input: "Slice_19/begin" + input: "Slice_19/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "MatMul_3" + op: "BatchMatMulV2" + input: "truediv_1" + input: "Slice_19" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: true + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "Reshape_79/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377 \000\000\000" + } + } + } +} +node { + name: "Reshape_79" + op: "Reshape" + input: "MatMul_3" + input: "Reshape_79/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_19" + op: "Cast" + input: "Reshape_79" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Shape_21" + op: "Shape" + input: "Reshape_65" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_61/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_61/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_61/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_61" + op: "StridedSlice" + input: "Shape_21" + input: "strided_slice_61/stack" + input: "strided_slice_61/stack_1" + input: "strided_slice_61/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_62/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_62/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_62/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_62" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_62/stack" + input: "strided_slice_62/stack_1" + input: "strided_slice_62/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_80/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 32 + } + } + } +} +node { + name: "Reshape_80/shape" + op: "Pack" + input: "strided_slice_61" + input: "strided_slice_62" + input: "Reshape_80/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_80" + op: "Reshape" + input: "Cast_19" + input: "Reshape_80/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_13/concat" + op: "Identity" + input: "Reshape_80" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_descriptor_qmmm" + op: "Identity" + input: "concat_13/concat" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "fitting_attr_qmmm/t_bias_atom_e" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + dim { + size: 6 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "fitting_attr_qmmm/t_bias_atom_e/read" + op: "Identity" + input: "fitting_attr_qmmm/t_bias_atom_e" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr_qmmm/t_bias_atom_e" + } + } + } +} +node { + name: "strided_slice_65/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_65/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_65/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_65" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_65/stack" + input: "strided_slice_65/stack_1" + input: "strided_slice_65/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_82/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_82/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 32 + } + } + } +} +node { + name: "Reshape_82/shape" + op: "Pack" + input: "Reshape_82/shape/0" + input: "strided_slice_65" + input: "Reshape_82/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_82" + op: "Reshape" + input: "o_descriptor_qmmm" + input: "Reshape_82/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_66/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_66/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_66/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_66" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_66/stack" + input: "strided_slice_66/stack_1" + input: "strided_slice_66/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "zeros_2/packed/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 32 + } + } + } +} +node { + name: "zeros_2/packed" + op: "Pack" + input: "strided_slice_49" + input: "strided_slice_66" + input: "zeros_2/packed/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "zeros_2/Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 0.0 + } + } + } +} +node { + name: "zeros_2" + op: "Fill" + input: "zeros_2/packed" + input: "zeros_2/Const" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_67/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_67/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_67/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_67" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_67/stack" + input: "strided_slice_67/stack_1" + input: "strided_slice_67/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_83/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_83/shape" + op: "Pack" + input: "Reshape_83/shape/0" + input: "strided_slice_67" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_83" + op: "Reshape" + input: "GatherV2_5" + input: "Reshape_83/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_68/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_68/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_68/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_68" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_68/stack" + input: "strided_slice_68/stack_1" + input: "strided_slice_68/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_21/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_21/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_21/size" + op: "Pack" + input: "Slice_21/size/0" + input: "strided_slice_68" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_21" + op: "Slice" + input: "Reshape_83" + input: "Slice_21/begin" + input: "Slice_21/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GreaterEqual_1/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "GreaterEqual_1" + op: "GreaterEqual" + input: "Slice_21" + input: "GreaterEqual_1/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_21" + op: "Cast" + input: "GreaterEqual_1" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_BOOL + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_84/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_84" + op: "Reshape" + input: "Slice_21" + input: "Reshape_84/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value_5/Minimum/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 5 + } + } + } +} +node { + name: "clip_by_value_5/Minimum" + op: "Minimum" + input: "Reshape_84" + input: "clip_by_value_5/Minimum/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "clip_by_value_5/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "clip_by_value_5" + op: "Maximum" + input: "clip_by_value_5/Minimum" + input: "clip_by_value_5/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_7/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@t_typeebd" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_7" + op: "GatherV2" + input: "t_typeebd" + input: "clip_by_value_5" + input: "embedding_lookup_7/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@t_typeebd" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_7/Identity" + op: "Identity" + input: "embedding_lookup_7" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Cast_22" + op: "Cast" + input: "embedding_lookup_7/Identity" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_85/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377 \000\000\000" + } + } + } +} +node { + name: "Reshape_85" + op: "Reshape" + input: "Reshape_82" + input: "Reshape_85/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_15/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_15" + op: "ConcatV2" + input: "Reshape_85" + input: "Cast_22" + input: "concat_15/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_69/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_69/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_69/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_69" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_69/stack" + input: "strided_slice_69/stack_1" + input: "strided_slice_69/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_86/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_86/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 40 + } + } + } +} +node { + name: "Reshape_86/shape" + op: "Pack" + input: "Reshape_86/shape/0" + input: "strided_slice_69" + input: "Reshape_86/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_86" + op: "Reshape" + input: "concat_15" + input: "Reshape_86/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_70/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_70/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_70/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_70" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_70/stack" + input: "strided_slice_70/stack_1" + input: "strided_slice_70/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Cast_23" + op: "Cast" + input: "Reshape_86" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Slice_22/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_22/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_22/size/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_22/size" + op: "Pack" + input: "Slice_22/size/0" + input: "strided_slice_70" + input: "Slice_22/size/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_22" + op: "Slice" + input: "Cast_23" + input: "Slice_22/begin" + input: "Slice_22/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_87/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377(\000\000\000" + } + } + } +} +node { + name: "Reshape_87" + op: "Reshape" + input: "Slice_22" + input: "Reshape_87/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_0_qmmm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 40 + } + dim { + size: 2 + } + } + tensor_content: "\2241\000\276\224\223j>\333(%\rr\263=c\366q\275\261\261j\276C^G>\343\347\177\273\304@\'\275\3331r>s\263\211\276Fa\212\275\336\335\262\275\347\253J=\254\3762>h\375\263\2764\223\230=\254jO\276\364\211a=\271\262\210\276\272\030\313\273\016\'\016>j$Z\276\265\024j>\303(\033\275\267\206\347\275\236\237\367\274\023I5<\306\007\310=1Bx\276\022\rR>Xf9\276\005\177\237;B\301\226=X\024\212=M;H\275nq\216\275h\257\216\2769\341\235\245 o>NN\016=\310\2134\275\242\347)=\333\364\211>N\372\021>\373\254\242\276\315\262F\275\321=?>&\221\333=-](\276?\357\t;\247\264(\276>\256\350\274g@\256\276s\200\220>\253nD>7\266T\275\034K\312\273\370DT\275\340PB\275&\\\362\275\310u5\275\221B:\275\357\360\377\275" + } + } + } +} +node { + name: "layer_0_qmmm/matrix/read" + op: "Identity" + input: "layer_0_qmmm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_qmmm/matrix" + } + } + } +} +node { + name: "layer_0_qmmm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\'\255[\277\211WI\276" + } + } + } +} +node { + name: "layer_0_qmmm/bias/read" + op: "Identity" + input: "layer_0_qmmm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_0_qmmm/bias" + } + } + } +} +node { + name: "layer_0_qmmm/MatMul" + op: "MatMul" + input: "Reshape_87" + input: "layer_0_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_0_qmmm/BiasAdd" + op: "BiasAdd" + input: "layer_0_qmmm/MatMul" + input: "layer_0_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_0_qmmm/Tanh" + op: "Tanh" + input: "layer_0_qmmm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_0_qmmm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_0_qmmm/Reshape" + op: "Reshape" + input: "layer_0_qmmm/Tanh" + input: "layer_0_qmmm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_1_qmmm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 2 + } + } + tensor_content: "\035\264\027>*\253\200\274;\030\340\274\313#\232<" + } + } + } +} +node { + name: "layer_1_qmmm/matrix/read" + op: "Identity" + input: "layer_1_qmmm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_1_qmmm/matrix" + } + } + } +} +node { + name: "layer_1_qmmm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "k\313\226\277\270\2765\277" + } + } + } +} +node { + name: "layer_1_qmmm/bias/read" + op: "Identity" + input: "layer_1_qmmm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_1_qmmm/bias" + } + } + } +} +node { + name: "layer_1_qmmm/MatMul" + op: "MatMul" + input: "layer_0_qmmm/Reshape" + input: "layer_1_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_1_qmmm/BiasAdd" + op: "BiasAdd" + input: "layer_1_qmmm/MatMul" + input: "layer_1_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_1_qmmm/idt" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\207%\316=|,\316=" + } + } + } +} +node { + name: "layer_1_qmmm/idt/read" + op: "Identity" + input: "layer_1_qmmm/idt" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_1_qmmm/idt" + } + } + } +} +node { + name: "layer_1_qmmm/Tanh" + op: "Tanh" + input: "layer_1_qmmm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_1_qmmm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_1_qmmm/Reshape" + op: "Reshape" + input: "layer_1_qmmm/Tanh" + input: "layer_1_qmmm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_1_qmmm/mul" + op: "Mul" + input: "layer_1_qmmm/Reshape" + input: "layer_1_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "add_10" + op: "AddV2" + input: "layer_0_qmmm/Reshape" + input: "layer_1_qmmm/mul" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_2_qmmm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 2 + } + } + tensor_content: "\035J\300\274y\031\032\277\333Z\025?\344\326\025?" + } + } + } +} +node { + name: "layer_2_qmmm/matrix/read" + op: "Identity" + input: "layer_2_qmmm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_2_qmmm/matrix" + } + } + } +} +node { + name: "layer_2_qmmm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "J\223\376\276n\333\375>" + } + } + } +} +node { + name: "layer_2_qmmm/bias/read" + op: "Identity" + input: "layer_2_qmmm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_2_qmmm/bias" + } + } + } +} +node { + name: "layer_2_qmmm/MatMul" + op: "MatMul" + input: "add_10" + input: "layer_2_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_2_qmmm/BiasAdd" + op: "BiasAdd" + input: "layer_2_qmmm/MatMul" + input: "layer_2_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_2_qmmm/idt" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "d*\317=X\313\313=" + } + } + } +} +node { + name: "layer_2_qmmm/idt/read" + op: "Identity" + input: "layer_2_qmmm/idt" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@layer_2_qmmm/idt" + } + } + } +} +node { + name: "layer_2_qmmm/Tanh" + op: "Tanh" + input: "layer_2_qmmm/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_2_qmmm/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_2_qmmm/Reshape" + op: "Reshape" + input: "layer_2_qmmm/Tanh" + input: "layer_2_qmmm/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_2_qmmm/mul" + op: "Mul" + input: "layer_2_qmmm/Reshape" + input: "layer_2_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "add_11" + op: "AddV2" + input: "add_10" + input: "layer_2_qmmm/mul" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "final_layer_qmmm/matrix" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + dim { + size: 1 + } + } + tensor_content: "\037\356q?k\024\024?" + } + } + } +} +node { + name: "final_layer_qmmm/matrix/read" + op: "Identity" + input: "final_layer_qmmm/matrix" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_qmmm/matrix" + } + } + } +} +node { + name: "final_layer_qmmm/bias" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + } + float_val: 1.4039429426193237 + } + } + } +} +node { + name: "final_layer_qmmm/bias/read" + op: "Identity" + input: "final_layer_qmmm/bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@final_layer_qmmm/bias" + } + } + } +} +node { + name: "final_layer_qmmm/MatMul" + op: "MatMul" + input: "add_11" + input: "final_layer_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "final_layer_qmmm/BiasAdd" + op: "BiasAdd" + input: "final_layer_qmmm/MatMul" + input: "final_layer_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "Cast_24" + op: "Cast" + input: "final_layer_qmmm/BiasAdd" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Reshape_88/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377 \000\000\000" + } + } + } +} +node { + name: "Reshape_88" + op: "Reshape" + input: "zeros_2" + input: "Reshape_88/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "concat_16/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_16" + op: "ConcatV2" + input: "Reshape_88" + input: "Cast_22" + input: "concat_16/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_71/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_71/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_71/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_71" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_71/stack" + input: "strided_slice_71/stack_1" + input: "strided_slice_71/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_89/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_89/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 40 + } + } + } +} +node { + name: "Reshape_89/shape" + op: "Pack" + input: "Reshape_89/shape/0" + input: "strided_slice_71" + input: "Reshape_89/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_89" + op: "Reshape" + input: "concat_16" + input: "Reshape_89/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_72/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_72/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_72/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_72" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_72/stack" + input: "strided_slice_72/stack_1" + input: "strided_slice_72/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Cast_25" + op: "Cast" + input: "Reshape_89" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "Slice_23/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_23/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_23/size/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_23/size" + op: "Pack" + input: "Slice_23/size/0" + input: "strided_slice_72" + input: "Slice_23/size/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_23" + op: "Slice" + input: "Cast_25" + input: "Slice_23/begin" + input: "Slice_23/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Reshape_90/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377(\000\000\000" + } + } + } +} +node { + name: "Reshape_90" + op: "Reshape" + input: "Slice_23" + input: "Reshape_90/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_0_qmmm_1/MatMul" + op: "MatMul" + input: "Reshape_90" + input: "layer_0_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_0_qmmm_1/BiasAdd" + op: "BiasAdd" + input: "layer_0_qmmm_1/MatMul" + input: "layer_0_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_0_qmmm_1/Tanh" + op: "Tanh" + input: "layer_0_qmmm_1/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_0_qmmm_1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_0_qmmm_1/Reshape" + op: "Reshape" + input: "layer_0_qmmm_1/Tanh" + input: "layer_0_qmmm_1/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_1_qmmm_1/MatMul" + op: "MatMul" + input: "layer_0_qmmm_1/Reshape" + input: "layer_1_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_1_qmmm_1/BiasAdd" + op: "BiasAdd" + input: "layer_1_qmmm_1/MatMul" + input: "layer_1_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_1_qmmm_1/Tanh" + op: "Tanh" + input: "layer_1_qmmm_1/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_1_qmmm_1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_1_qmmm_1/Reshape" + op: "Reshape" + input: "layer_1_qmmm_1/Tanh" + input: "layer_1_qmmm_1/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_1_qmmm_1/mul" + op: "Mul" + input: "layer_1_qmmm_1/Reshape" + input: "layer_1_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "add_12" + op: "AddV2" + input: "layer_0_qmmm_1/Reshape" + input: "layer_1_qmmm_1/mul" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_2_qmmm_1/MatMul" + op: "MatMul" + input: "add_12" + input: "layer_2_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "layer_2_qmmm_1/BiasAdd" + op: "BiasAdd" + input: "layer_2_qmmm_1/MatMul" + input: "layer_2_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "layer_2_qmmm_1/Tanh" + op: "Tanh" + input: "layer_2_qmmm_1/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "layer_2_qmmm_1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\002\000\000\000" + } + } + } +} +node { + name: "layer_2_qmmm_1/Reshape" + op: "Reshape" + input: "layer_2_qmmm_1/Tanh" + input: "layer_2_qmmm_1/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "layer_2_qmmm_1/mul" + op: "Mul" + input: "layer_2_qmmm_1/Reshape" + input: "layer_2_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "add_13" + op: "AddV2" + input: "add_12" + input: "layer_2_qmmm_1/mul" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "final_layer_qmmm_1/MatMul" + op: "MatMul" + input: "add_13" + input: "final_layer_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "final_layer_qmmm_1/BiasAdd" + op: "BiasAdd" + input: "final_layer_qmmm_1/MatMul" + input: "final_layer_qmmm/bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "Cast_26" + op: "Cast" + input: "final_layer_qmmm_1/BiasAdd" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "sub_5" + op: "Sub" + input: "Cast_24" + input: "Cast_26" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Shape_23" + op: "Shape" + input: "Reshape_86" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_73/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_73/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_73/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_73" + op: "StridedSlice" + input: "Shape_23" + input: "strided_slice_73/stack" + input: "strided_slice_73/stack_1" + input: "strided_slice_73/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_74/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_74/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_74/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_74" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_74/stack" + input: "strided_slice_74/stack_1" + input: "strided_slice_74/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_91/shape" + op: "Pack" + input: "strided_slice_73" + input: "strided_slice_74" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_91" + op: "Reshape" + input: "sub_5" + input: "Reshape_91/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "embedding_lookup_8/axis" + op: "Const" + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr_qmmm/t_bias_atom_e" + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "embedding_lookup_8" + op: "GatherV2" + input: "fitting_attr_qmmm/t_bias_atom_e/read" + input: "clip_by_value_5" + input: "embedding_lookup_8/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "_class" + value { + list { + s: "loc:@fitting_attr_qmmm/t_bias_atom_e" + } + } + } + attr { + key: "batch_dims" + value { + i: 0 + } + } +} +node { + name: "embedding_lookup_8/Identity" + op: "Identity" + input: "embedding_lookup_8" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Shape_24" + op: "Shape" + input: "Reshape_86" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_75/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_75/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_75/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_75" + op: "StridedSlice" + input: "Shape_24" + input: "strided_slice_75/stack" + input: "strided_slice_75/stack_1" + input: "strided_slice_75/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "strided_slice_76/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_76/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 8 + } + } + } +} +node { + name: "strided_slice_76/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_76" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_76/stack" + input: "strided_slice_76/stack_1" + input: "strided_slice_76/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "Rank_1" + op: "Rank" + input: "strided_slice_76" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "range_1/start" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "range_1/delta" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "range_1" + op: "Range" + input: "range_1/start" + input: "Rank_1" + input: "range_1/delta" + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Sum_1" + op: "Sum" + input: "strided_slice_76" + input: "range_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "Reshape_92/shape" + op: "Pack" + input: "strided_slice_75" + input: "Sum_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_92" + op: "Reshape" + input: "embedding_lookup_8/Identity" + input: "Reshape_92/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "add_14" + op: "AddV2" + input: "Reshape_91" + input: "Reshape_92" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "mul_24" + op: "Mul" + input: "add_14" + input: "Cast_21" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "Reshape_93/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_93" + op: "Reshape" + input: "mul_24" + input: "Reshape_93/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_77/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_77/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_77/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_77" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_77/stack" + input: "strided_slice_77/stack_1" + input: "strided_slice_77/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "o_atom_energy_qmmm/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_atom_energy_qmmm/shape" + op: "Pack" + input: "o_atom_energy_qmmm/shape/0" + input: "strided_slice_77" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_atom_energy_qmmm" + op: "Reshape" + input: "Reshape_93" + input: "o_atom_energy_qmmm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_energy_qmmm/reduction_indices" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "o_energy_qmmm" + op: "Sum" + input: "o_atom_energy_qmmm" + input: "o_energy_qmmm/reduction_indices" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/Shape" + op: "Shape" + input: "Reshape_93" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/grad_ys_0/Const" + op: "Const" + attr { + key: "dtype" + value { + type: DT_DOUBLE + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_DOUBLE + tensor_shape { + } + double_val: 1.0 + } + } + } +} +node { + name: "gradients_1/grad_ys_0" + op: "Fill" + input: "gradients_1/Shape" + input: "gradients_1/grad_ys_0/Const" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_93_grad/Shape" + op: "Shape" + input: "mul_24" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_93_grad/Reshape" + op: "Reshape" + input: "gradients_1/grad_ys_0" + input: "gradients_1/Reshape_93_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/mul_24_grad/Shape" + op: "Shape" + input: "add_14" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/mul_24_grad/Shape_1" + op: "Shape" + input: "Cast_21" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/mul_24_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/mul_24_grad/Shape" + input: "gradients_1/mul_24_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/mul_24_grad/Mul" + op: "Mul" + input: "gradients_1/Reshape_93_grad/Reshape" + input: "Cast_21" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients_1/mul_24_grad/Sum" + op: "Sum" + input: "gradients_1/mul_24_grad/Mul" + input: "gradients_1/mul_24_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/mul_24_grad/Reshape" + op: "Reshape" + input: "gradients_1/mul_24_grad/Sum" + input: "gradients_1/mul_24_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/add_14_grad/Shape" + op: "Shape" + input: "Reshape_91" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/add_14_grad/Shape_1" + op: "Shape" + input: "Reshape_92" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/add_14_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/add_14_grad/Shape" + input: "gradients_1/add_14_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/add_14_grad/Sum" + op: "Sum" + input: "gradients_1/mul_24_grad/Reshape" + input: "gradients_1/add_14_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/add_14_grad/Reshape" + op: "Reshape" + input: "gradients_1/add_14_grad/Sum" + input: "gradients_1/add_14_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_91_grad/Shape" + op: "Shape" + input: "sub_5" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_91_grad/Reshape" + op: "Reshape" + input: "gradients_1/add_14_grad/Reshape" + input: "gradients_1/Reshape_91_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/sub_5_grad/Shape" + op: "Shape" + input: "Cast_24" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/sub_5_grad/Shape_1" + op: "Shape" + input: "Cast_26" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/sub_5_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/sub_5_grad/Shape" + input: "gradients_1/sub_5_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/sub_5_grad/Sum" + op: "Sum" + input: "gradients_1/Reshape_91_grad/Reshape" + input: "gradients_1/sub_5_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/sub_5_grad/Reshape" + op: "Reshape" + input: "gradients_1/sub_5_grad/Sum" + input: "gradients_1/sub_5_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Cast_24_grad/Cast" + op: "Cast" + input: "gradients_1/sub_5_grad/Reshape" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients_1/final_layer_qmmm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients_1/Cast_24_grad/Cast" + input: "final_layer_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients_1/add_11_grad/Shape" + op: "Shape" + input: "add_10" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/add_11_grad/Shape_1" + op: "Shape" + input: "layer_2_qmmm/mul" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/add_11_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/add_11_grad/Shape" + input: "gradients_1/add_11_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/add_11_grad/Sum" + op: "Sum" + input: "gradients_1/final_layer_qmmm/MatMul_grad/MatMul" + input: "gradients_1/add_11_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/add_11_grad/Reshape" + op: "Reshape" + input: "gradients_1/add_11_grad/Sum" + input: "gradients_1/add_11_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/add_11_grad/Sum_1" + op: "Sum" + input: "gradients_1/final_layer_qmmm/MatMul_grad/MatMul" + input: "gradients_1/add_11_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/add_11_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/add_11_grad/Sum_1" + input: "gradients_1/add_11_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/layer_2_qmmm/mul_grad/Shape" + op: "Shape" + input: "layer_2_qmmm/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/layer_2_qmmm/mul_grad/Shape_1" + op: "Shape" + input: "layer_2_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/layer_2_qmmm/mul_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/layer_2_qmmm/mul_grad/Shape" + input: "gradients_1/layer_2_qmmm/mul_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/layer_2_qmmm/mul_grad/Mul" + op: "Mul" + input: "gradients_1/add_11_grad/Reshape_1" + input: "layer_2_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/layer_2_qmmm/mul_grad/Sum" + op: "Sum" + input: "gradients_1/layer_2_qmmm/mul_grad/Mul" + input: "gradients_1/layer_2_qmmm/mul_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/layer_2_qmmm/mul_grad/Reshape" + op: "Reshape" + input: "gradients_1/layer_2_qmmm/mul_grad/Sum" + input: "gradients_1/layer_2_qmmm/mul_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/layer_2_qmmm/Reshape_grad/Shape" + op: "Shape" + input: "layer_2_qmmm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/layer_2_qmmm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients_1/layer_2_qmmm/mul_grad/Reshape" + input: "gradients_1/layer_2_qmmm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients_1/layer_2_qmmm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_2_qmmm/Tanh" + input: "gradients_1/layer_2_qmmm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/layer_2_qmmm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients_1/layer_2_qmmm/Tanh_grad/TanhGrad" + input: "layer_2_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients_1/AddN" + op: "AddN" + input: "gradients_1/add_11_grad/Reshape" + input: "gradients_1/layer_2_qmmm/MatMul_grad/MatMul" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/add_11_grad/Reshape" + } + } + } +} +node { + name: "gradients_1/add_10_grad/Shape" + op: "Shape" + input: "layer_0_qmmm/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/add_10_grad/Shape_1" + op: "Shape" + input: "layer_1_qmmm/mul" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/add_10_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/add_10_grad/Shape" + input: "gradients_1/add_10_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/add_10_grad/Sum" + op: "Sum" + input: "gradients_1/AddN" + input: "gradients_1/add_10_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/add_10_grad/Reshape" + op: "Reshape" + input: "gradients_1/add_10_grad/Sum" + input: "gradients_1/add_10_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/add_10_grad/Sum_1" + op: "Sum" + input: "gradients_1/AddN" + input: "gradients_1/add_10_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/add_10_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/add_10_grad/Sum_1" + input: "gradients_1/add_10_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/layer_1_qmmm/mul_grad/Shape" + op: "Shape" + input: "layer_1_qmmm/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/layer_1_qmmm/mul_grad/Shape_1" + op: "Shape" + input: "layer_1_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/layer_1_qmmm/mul_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/layer_1_qmmm/mul_grad/Shape" + input: "gradients_1/layer_1_qmmm/mul_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/layer_1_qmmm/mul_grad/Mul" + op: "Mul" + input: "gradients_1/add_10_grad/Reshape_1" + input: "layer_1_qmmm/idt/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/layer_1_qmmm/mul_grad/Sum" + op: "Sum" + input: "gradients_1/layer_1_qmmm/mul_grad/Mul" + input: "gradients_1/layer_1_qmmm/mul_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/layer_1_qmmm/mul_grad/Reshape" + op: "Reshape" + input: "gradients_1/layer_1_qmmm/mul_grad/Sum" + input: "gradients_1/layer_1_qmmm/mul_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/layer_1_qmmm/Reshape_grad/Shape" + op: "Shape" + input: "layer_1_qmmm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/layer_1_qmmm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients_1/layer_1_qmmm/mul_grad/Reshape" + input: "gradients_1/layer_1_qmmm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients_1/layer_1_qmmm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_1_qmmm/Tanh" + input: "gradients_1/layer_1_qmmm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/layer_1_qmmm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients_1/layer_1_qmmm/Tanh_grad/TanhGrad" + input: "layer_1_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients_1/AddN_1" + op: "AddN" + input: "gradients_1/add_10_grad/Reshape" + input: "gradients_1/layer_1_qmmm/MatMul_grad/MatMul" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/add_10_grad/Reshape" + } + } + } +} +node { + name: "gradients_1/layer_0_qmmm/Reshape_grad/Shape" + op: "Shape" + input: "layer_0_qmmm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/layer_0_qmmm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients_1/AddN_1" + input: "gradients_1/layer_0_qmmm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients_1/layer_0_qmmm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "layer_0_qmmm/Tanh" + input: "gradients_1/layer_0_qmmm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/layer_0_qmmm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients_1/layer_0_qmmm/Tanh_grad/TanhGrad" + input: "layer_0_qmmm/matrix/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients_1/Reshape_87_grad/Shape" + op: "Shape" + input: "Slice_22" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_87_grad/Reshape" + op: "Reshape" + input: "gradients_1/layer_0_qmmm/MatMul_grad/MatMul" + input: "gradients_1/Reshape_87_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_22_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "gradients_1/Slice_22_grad/Shape" + op: "Shape" + input: "Slice_22" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_22_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_22_grad/stack" + op: "Pack" + input: "gradients_1/Slice_22_grad/Rank" + input: "gradients_1/Slice_22_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients_1/Slice_22_grad/Reshape" + op: "Reshape" + input: "Slice_22/begin" + input: "gradients_1/Slice_22_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_22_grad/Shape_1" + op: "Shape" + input: "Cast_23" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_22_grad/sub" + op: "Sub" + input: "gradients_1/Slice_22_grad/Shape_1" + input: "gradients_1/Slice_22_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_22_grad/sub_1" + op: "Sub" + input: "gradients_1/Slice_22_grad/sub" + input: "Slice_22/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_22_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/Slice_22_grad/sub_1" + input: "gradients_1/Slice_22_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_22_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_22_grad/concat" + op: "ConcatV2" + input: "gradients_1/Slice_22_grad/Reshape" + input: "gradients_1/Slice_22_grad/Reshape_1" + input: "gradients_1/Slice_22_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_22_grad/Pad" + op: "Pad" + input: "gradients_1/Reshape_87_grad/Reshape" + input: "gradients_1/Slice_22_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Cast_23_grad/Cast" + op: "Cast" + input: "gradients_1/Slice_22_grad/Pad" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients_1/Reshape_86_grad/Shape" + op: "Shape" + input: "concat_15" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_86_grad/Reshape" + op: "Reshape" + input: "gradients_1/Cast_23_grad/Cast" + input: "gradients_1/Reshape_86_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/concat_15_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients_1/concat_15_grad/mod" + op: "FloorMod" + input: "concat_15/axis" + input: "gradients_1/concat_15_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/concat_15_grad/ShapeN" + op: "ShapeN" + input: "Reshape_85" + input: "Cast_22" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/concat_15_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients_1/concat_15_grad/mod" + input: "gradients_1/concat_15_grad/ShapeN" + input: "gradients_1/concat_15_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients_1/concat_15_grad/Slice" + op: "Slice" + input: "gradients_1/Reshape_86_grad/Reshape" + input: "gradients_1/concat_15_grad/ConcatOffset" + input: "gradients_1/concat_15_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients_1/Reshape_85_grad/Shape" + op: "Shape" + input: "Reshape_82" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_85_grad/Reshape" + op: "Reshape" + input: "gradients_1/concat_15_grad/Slice" + input: "gradients_1/Reshape_85_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_82_grad/Shape" + op: "Shape" + input: "o_descriptor_qmmm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_82_grad/Reshape" + op: "Reshape" + input: "gradients_1/Reshape_85_grad/Reshape" + input: "gradients_1/Reshape_82_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_80_grad/Shape" + op: "Shape" + input: "Cast_19" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_80_grad/Reshape" + op: "Reshape" + input: "gradients_1/Reshape_82_grad/Reshape" + input: "gradients_1/Reshape_80_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Cast_19_grad/Cast" + op: "Cast" + input: "gradients_1/Reshape_80_grad/Reshape" + attr { + key: "DstT" + value { + type: DT_FLOAT + } + } + attr { + key: "SrcT" + value { + type: DT_DOUBLE + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients_1/Reshape_79_grad/Shape" + op: "Shape" + input: "MatMul_3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_79_grad/Reshape" + op: "Reshape" + input: "gradients_1/Cast_19_grad/Cast" + input: "gradients_1/Reshape_79_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/MatMul_3_grad/MatMul" + op: "BatchMatMulV2" + input: "Slice_19" + input: "gradients_1/Reshape_79_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } +} +node { + name: "gradients_1/MatMul_3_grad/MatMul_1" + op: "BatchMatMulV2" + input: "truediv_1" + input: "gradients_1/Reshape_79_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "gradients_1/MatMul_3_grad/Shape" + op: "Shape" + input: "truediv_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/Shape_1" + op: "Shape" + input: "Slice_19" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice" + op: "StridedSlice" + input: "gradients_1/MatMul_3_grad/Shape" + input: "gradients_1/MatMul_3_grad/strided_slice/stack" + input: "gradients_1/MatMul_3_grad/strided_slice/stack_1" + input: "gradients_1/MatMul_3_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/MatMul_3_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients_1/MatMul_3_grad/Shape_1" + input: "gradients_1/MatMul_3_grad/strided_slice_1/stack" + input: "gradients_1/MatMul_3_grad/strided_slice_1/stack_1" + input: "gradients_1/MatMul_3_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients_1/MatMul_3_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/MatMul_3_grad/strided_slice" + input: "gradients_1/MatMul_3_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/MatMul_3_grad/Sum" + op: "Sum" + input: "gradients_1/MatMul_3_grad/MatMul" + input: "gradients_1/MatMul_3_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/MatMul_3_grad/Reshape" + op: "Reshape" + input: "gradients_1/MatMul_3_grad/Sum" + input: "gradients_1/MatMul_3_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/MatMul_3_grad/Sum_1" + op: "Sum" + input: "gradients_1/MatMul_3_grad/MatMul_1" + input: "gradients_1/MatMul_3_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/MatMul_3_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/MatMul_3_grad/Sum_1" + input: "gradients_1/MatMul_3_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_19_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "gradients_1/Slice_19_grad/Shape" + op: "Shape" + input: "Slice_19" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_19_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_19_grad/stack" + op: "Pack" + input: "gradients_1/Slice_19_grad/Rank" + input: "gradients_1/Slice_19_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients_1/Slice_19_grad/Reshape" + op: "Reshape" + input: "Slice_19/begin" + input: "gradients_1/Slice_19_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_19_grad/Shape_1" + op: "Shape" + input: "truediv_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_19_grad/sub" + op: "Sub" + input: "gradients_1/Slice_19_grad/Shape_1" + input: "gradients_1/Slice_19_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_19_grad/sub_1" + op: "Sub" + input: "gradients_1/Slice_19_grad/sub" + input: "Slice_19/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_19_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/Slice_19_grad/sub_1" + input: "gradients_1/Slice_19_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_19_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_19_grad/concat" + op: "ConcatV2" + input: "gradients_1/Slice_19_grad/Reshape" + input: "gradients_1/Slice_19_grad/Reshape_1" + input: "gradients_1/Slice_19_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_19_grad/Pad" + op: "Pad" + input: "gradients_1/MatMul_3_grad/Reshape_1" + input: "gradients_1/Slice_19_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/AddN_2" + op: "AddN" + input: "gradients_1/MatMul_3_grad/Reshape" + input: "gradients_1/Slice_19_grad/Pad" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/MatMul_3_grad/Reshape" + } + } + } +} +node { + name: "gradients_1/truediv_1_grad/Shape" + op: "Shape" + input: "MatMul_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/truediv_1_grad/Shape_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + } + } + } + } + } +} +node { + name: "gradients_1/truediv_1_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/truediv_1_grad/Shape" + input: "gradients_1/truediv_1_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/truediv_1_grad/RealDiv" + op: "RealDiv" + input: "gradients_1/AddN_2" + input: "truediv_1/y" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/truediv_1_grad/Sum" + op: "Sum" + input: "gradients_1/truediv_1_grad/RealDiv" + input: "gradients_1/truediv_1_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/truediv_1_grad/Reshape" + op: "Reshape" + input: "gradients_1/truediv_1_grad/Sum" + input: "gradients_1/truediv_1_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/MatMul_2_grad/MatMul" + op: "BatchMatMulV2" + input: "Reshape_77" + input: "gradients_1/truediv_1_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: true + } + } +} +node { + name: "gradients_1/MatMul_2_grad/MatMul_1" + op: "BatchMatMulV2" + input: "Reshape_78" + input: "gradients_1/truediv_1_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "adj_x" + value { + b: false + } + } + attr { + key: "adj_y" + value { + b: false + } + } +} +node { + name: "gradients_1/MatMul_2_grad/Shape" + op: "Shape" + input: "Reshape_78" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/Shape_1" + op: "Shape" + input: "Reshape_77" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice" + op: "StridedSlice" + input: "gradients_1/MatMul_2_grad/Shape" + input: "gradients_1/MatMul_2_grad/strided_slice/stack" + input: "gradients_1/MatMul_2_grad/strided_slice/stack_1" + input: "gradients_1/MatMul_2_grad/strided_slice/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice_1/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice_1/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: -2 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice_1/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/MatMul_2_grad/strided_slice_1" + op: "StridedSlice" + input: "gradients_1/MatMul_2_grad/Shape_1" + input: "gradients_1/MatMul_2_grad/strided_slice_1/stack" + input: "gradients_1/MatMul_2_grad/strided_slice_1/stack_1" + input: "gradients_1/MatMul_2_grad/strided_slice_1/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 1 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "gradients_1/MatMul_2_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/MatMul_2_grad/strided_slice" + input: "gradients_1/MatMul_2_grad/strided_slice_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/MatMul_2_grad/Sum" + op: "Sum" + input: "gradients_1/MatMul_2_grad/MatMul" + input: "gradients_1/MatMul_2_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/MatMul_2_grad/Reshape" + op: "Reshape" + input: "gradients_1/MatMul_2_grad/Sum" + input: "gradients_1/MatMul_2_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/MatMul_2_grad/Sum_1" + op: "Sum" + input: "gradients_1/MatMul_2_grad/MatMul_1" + input: "gradients_1/MatMul_2_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/MatMul_2_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/MatMul_2_grad/Sum_1" + input: "gradients_1/MatMul_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_78_grad/Shape" + op: "Shape" + input: "Slice_16" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_78_grad/Reshape" + op: "Reshape" + input: "gradients_1/MatMul_2_grad/Reshape" + input: "gradients_1/Reshape_78_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_77_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/add_6" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_77_grad/Reshape" + op: "Reshape" + input: "gradients_1/MatMul_2_grad/Reshape_1" + input: "gradients_1/Reshape_77_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_6_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/mul_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_6_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qmmm/add_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_6_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Shape" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_6_grad/Sum" + op: "Sum" + input: "gradients_1/Reshape_77_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_6_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Sum" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_6_grad/Sum_1" + op: "Sum" + input: "gradients_1/Reshape_77_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_6_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Sum_1" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/mul_2_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/add_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/mul_2_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qmmm/mul_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/mul_2_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/filter_type_all_qmmm/mul_2_grad/Shape" + input: "gradients_1/filter_type_all_qmmm/mul_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/mul_2_grad/Mul" + op: "Mul" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Reshape" + input: "filter_type_all_qmmm/mul_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/mul_2_grad/Sum" + op: "Sum" + input: "gradients_1/filter_type_all_qmmm/mul_2_grad/Mul" + input: "gradients_1/filter_type_all_qmmm/mul_2_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/mul_2_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/mul_2_grad/Sum" + input: "gradients_1/filter_type_all_qmmm/mul_2_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/AddN_3" + op: "AddN" + input: "gradients_1/filter_type_all_qmmm/add_6_grad/Reshape_1" + input: "gradients_1/filter_type_all_qmmm/mul_2_grad/Reshape" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/filter_type_all_qmmm/add_6_grad/Reshape_1" + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_2_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/concat_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_2_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qmmm/Reshape_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_2_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Shape" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_2_grad/Sum" + op: "Sum" + input: "gradients_1/AddN_3" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_2_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Sum" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_2_grad/Sum_1" + op: "Sum" + input: "gradients_1/AddN_3" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_2_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Sum_1" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_2_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_2_grad/mod" + op: "FloorMod" + input: "filter_type_all_qmmm/concat_2/axis" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_2_grad/ShapeN" + op: "ShapeN" + input: "filter_type_all_qmmm/add_1" + input: "filter_type_all_qmmm/add_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_2_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/mod" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/ShapeN" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_2_grad/Slice" + op: "Slice" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/ConcatOffset" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_2_grad/Slice_1" + op: "Slice" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/ConcatOffset:1" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/ShapeN:1" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Reshape_2_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/Tanh_2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Reshape_2_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_2_grad/Reshape_1" + input: "gradients_1/filter_type_all_qmmm/Reshape_2_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Tanh_2_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all_qmmm/Tanh_2" + input: "gradients_1/filter_type_all_qmmm/Reshape_2_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/MatMul_2_grad/MatMul" + op: "MatMul" + input: "gradients_1/filter_type_all_qmmm/Tanh_2_grad/TanhGrad" + input: "filter_type_all_qmmm/matrix_3/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients_1/AddN_4" + op: "AddN" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/Slice" + input: "gradients_1/filter_type_all_qmmm/concat_2_grad/Slice_1" + input: "gradients_1/filter_type_all_qmmm/MatMul_2_grad/MatMul" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/filter_type_all_qmmm/concat_2_grad/Slice" + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_1_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/concat_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_1_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qmmm/Reshape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_1_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Shape" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_1_grad/Sum" + op: "Sum" + input: "gradients_1/AddN_4" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_1_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Sum" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_1_grad/Sum_1" + op: "Sum" + input: "gradients_1/AddN_4" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_1_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Sum_1" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_1_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_1_grad/mod" + op: "FloorMod" + input: "filter_type_all_qmmm/concat_1/axis" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_1_grad/ShapeN" + op: "ShapeN" + input: "filter_type_all_qmmm/add" + input: "filter_type_all_qmmm/add" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_1_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/mod" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/ShapeN" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_1_grad/Slice" + op: "Slice" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/ConcatOffset" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_1_grad/Slice_1" + op: "Slice" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/ConcatOffset:1" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/ShapeN:1" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Reshape_1_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/Tanh_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Reshape_1_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_1_grad/Reshape_1" + input: "gradients_1/filter_type_all_qmmm/Reshape_1_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Tanh_1_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all_qmmm/Tanh_1" + input: "gradients_1/filter_type_all_qmmm/Reshape_1_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/MatMul_1_grad/MatMul" + op: "MatMul" + input: "gradients_1/filter_type_all_qmmm/Tanh_1_grad/TanhGrad" + input: "filter_type_all_qmmm/matrix_2/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients_1/AddN_5" + op: "AddN" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/Slice" + input: "gradients_1/filter_type_all_qmmm/concat_1_grad/Slice_1" + input: "gradients_1/filter_type_all_qmmm/MatMul_1_grad/MatMul" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/filter_type_all_qmmm/concat_1_grad/Slice" + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_grad/Shape_1" + op: "Shape" + input: "filter_type_all_qmmm/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/filter_type_all_qmmm/add_grad/Shape" + input: "gradients_1/filter_type_all_qmmm/add_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_grad/Sum" + op: "Sum" + input: "gradients_1/AddN_5" + input: "gradients_1/filter_type_all_qmmm/add_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_grad/Sum" + input: "gradients_1/filter_type_all_qmmm/add_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_grad/Sum_1" + op: "Sum" + input: "gradients_1/AddN_5" + input: "gradients_1/filter_type_all_qmmm/add_grad/BroadcastGradientArgs:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/add_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_grad/Sum_1" + input: "gradients_1/filter_type_all_qmmm/add_grad/Shape_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_grad/mod" + op: "FloorMod" + input: "filter_type_all_qmmm/concat/axis" + input: "gradients_1/filter_type_all_qmmm/concat_grad/Rank" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_grad/ShapeN" + op: "ShapeN" + input: "Reshape_75" + input: "Reshape_75" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_grad/ConcatOffset" + op: "ConcatOffset" + input: "gradients_1/filter_type_all_qmmm/concat_grad/mod" + input: "gradients_1/filter_type_all_qmmm/concat_grad/ShapeN" + input: "gradients_1/filter_type_all_qmmm/concat_grad/ShapeN:1" + attr { + key: "N" + value { + i: 2 + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_grad/Slice" + op: "Slice" + input: "gradients_1/filter_type_all_qmmm/add_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/concat_grad/ConcatOffset" + input: "gradients_1/filter_type_all_qmmm/concat_grad/ShapeN" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/concat_grad/Slice_1" + op: "Slice" + input: "gradients_1/filter_type_all_qmmm/add_grad/Reshape" + input: "gradients_1/filter_type_all_qmmm/concat_grad/ConcatOffset:1" + input: "gradients_1/filter_type_all_qmmm/concat_grad/ShapeN:1" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Reshape_grad/Shape" + op: "Shape" + input: "filter_type_all_qmmm/Tanh" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Reshape_grad/Reshape" + op: "Reshape" + input: "gradients_1/filter_type_all_qmmm/add_grad/Reshape_1" + input: "gradients_1/filter_type_all_qmmm/Reshape_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } + attr { + key: "_has_manual_control_dependencies" + value { + b: true + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/Tanh_grad/TanhGrad" + op: "TanhGrad" + input: "filter_type_all_qmmm/Tanh" + input: "gradients_1/filter_type_all_qmmm/Reshape_grad/Reshape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "gradients_1/filter_type_all_qmmm/MatMul_grad/MatMul" + op: "MatMul" + input: "gradients_1/filter_type_all_qmmm/Tanh_grad/TanhGrad" + input: "filter_type_all_qmmm/matrix_1/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: true + } + } +} +node { + name: "gradients_1/AddN_6" + op: "AddN" + input: "gradients_1/filter_type_all_qmmm/concat_grad/Slice" + input: "gradients_1/filter_type_all_qmmm/concat_grad/Slice_1" + input: "gradients_1/filter_type_all_qmmm/MatMul_grad/MatMul" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/filter_type_all_qmmm/concat_grad/Slice" + } + } + } +} +node { + name: "gradients_1/Reshape_75_grad/Shape" + op: "Shape" + input: "Slice_17" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_75_grad/Reshape" + op: "Reshape" + input: "gradients_1/AddN_6" + input: "gradients_1/Reshape_75_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_17_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients_1/Slice_17_grad/Shape" + op: "Shape" + input: "Slice_17" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_17_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_17_grad/stack" + op: "Pack" + input: "gradients_1/Slice_17_grad/Rank" + input: "gradients_1/Slice_17_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients_1/Slice_17_grad/Reshape" + op: "Reshape" + input: "Slice_17/begin" + input: "gradients_1/Slice_17_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_17_grad/Shape_1" + op: "Shape" + input: "Reshape_74" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_17_grad/sub" + op: "Sub" + input: "gradients_1/Slice_17_grad/Shape_1" + input: "gradients_1/Slice_17_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_17_grad/sub_1" + op: "Sub" + input: "gradients_1/Slice_17_grad/sub" + input: "Slice_17/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_17_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/Slice_17_grad/sub_1" + input: "gradients_1/Slice_17_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_17_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_17_grad/concat" + op: "ConcatV2" + input: "gradients_1/Slice_17_grad/Reshape" + input: "gradients_1/Slice_17_grad/Reshape_1" + input: "gradients_1/Slice_17_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_17_grad/Pad" + op: "Pad" + input: "gradients_1/Reshape_75_grad/Reshape" + input: "gradients_1/Slice_17_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_74_grad/Shape" + op: "Shape" + input: "Slice_16" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_74_grad/Reshape" + op: "Reshape" + input: "gradients_1/Slice_17_grad/Pad" + input: "gradients_1/Reshape_74_grad/Shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/AddN_7" + op: "AddN" + input: "gradients_1/Reshape_78_grad/Reshape" + input: "gradients_1/Reshape_74_grad/Reshape" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@gradients_1/Reshape_78_grad/Reshape" + } + } + } +} +node { + name: "gradients_1/Slice_16_grad/Rank" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 2 + } + } + } +} +node { + name: "gradients_1/Slice_16_grad/Shape" + op: "Shape" + input: "Slice_16" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_16_grad/stack/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_16_grad/stack" + op: "Pack" + input: "gradients_1/Slice_16_grad/Rank" + input: "gradients_1/Slice_16_grad/stack/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "gradients_1/Slice_16_grad/Reshape" + op: "Reshape" + input: "Slice_16/begin" + input: "gradients_1/Slice_16_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_16_grad/Shape_1" + op: "Shape" + input: "Cast_17" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Slice_16_grad/sub" + op: "Sub" + input: "gradients_1/Slice_16_grad/Shape_1" + input: "gradients_1/Slice_16_grad/Shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_16_grad/sub_1" + op: "Sub" + input: "gradients_1/Slice_16_grad/sub" + input: "Slice_16/begin" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_16_grad/Reshape_1" + op: "Reshape" + input: "gradients_1/Slice_16_grad/sub_1" + input: "gradients_1/Slice_16_grad/stack" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_16_grad/concat/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "gradients_1/Slice_16_grad/concat" + op: "ConcatV2" + input: "gradients_1/Slice_16_grad/Reshape" + input: "gradients_1/Slice_16_grad/Reshape_1" + input: "gradients_1/Slice_16_grad/concat/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Slice_16_grad/Pad" + op: "Pad" + input: "gradients_1/AddN_7" + input: "gradients_1/Slice_16_grad/concat" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tpaddings" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Cast_17_grad/Cast" + op: "Cast" + input: "gradients_1/Slice_16_grad/Pad" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "gradients_1/mul_22_grad/Shape" + op: "Shape" + input: "Reshape_66" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/mul_22_grad/Shape_1" + op: "Shape" + input: "Reshape_72" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/mul_22_grad/BroadcastGradientArgs" + op: "BroadcastGradientArgs" + input: "gradients_1/mul_22_grad/Shape" + input: "gradients_1/mul_22_grad/Shape_1" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/mul_22_grad/Mul" + op: "Mul" + input: "gradients_1/Cast_17_grad/Cast" + input: "Reshape_72" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "gradients_1/mul_22_grad/Sum" + op: "Sum" + input: "gradients_1/mul_22_grad/Mul" + input: "gradients_1/mul_22_grad/BroadcastGradientArgs" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } + attr { + key: "keep_dims" + value { + b: false + } + } +} +node { + name: "gradients_1/mul_22_grad/Reshape" + op: "Reshape" + input: "gradients_1/mul_22_grad/Sum" + input: "gradients_1/mul_22_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_66_grad/Shape" + op: "Shape" + input: "Reshape_65" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_66_grad/Reshape" + op: "Reshape" + input: "gradients_1/mul_22_grad/Reshape" + input: "gradients_1/Reshape_66_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "gradients_1/Reshape_65_grad/Shape" + op: "Shape" + input: "o_rmat_qmmm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "gradients_1/Reshape_65_grad/Reshape" + op: "Reshape" + input: "gradients_1/Reshape_66_grad/Reshape" + input: "gradients_1/Reshape_65_grad/Shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_78/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_78/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_78/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_78" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_78/stack" + input: "strided_slice_78/stack_1" + input: "strided_slice_78/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_26/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 108 + } + } + } +} +node { + name: "mul_26" + op: "Mul" + input: "strided_slice_78" + input: "mul_26/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_94/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_94/shape" + op: "Pack" + input: "Reshape_94/shape/0" + input: "mul_26" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_94" + op: "Reshape" + input: "gradients_1/Reshape_65_grad/Reshape" + input: "Reshape_94/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ProdForceSeA_1" + op: "ProdForceSeA" + input: "Reshape_94" + input: "o_rmat_deriv_qmmm" + input: "o_nlist_qmmm" + input: "DprcPairwiseIdx:5" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "n_a_sel" + value { + i: 27 + } + } + attr { + key: "n_r_sel" + value { + i: 0 + } + } +} +node { + name: "ProdVirialSeA_1" + op: "ProdVirialSeA" + input: "Reshape_94" + input: "o_rmat_deriv_qmmm" + input: "o_rij_qmmm" + input: "o_nlist_qmmm" + input: "DprcPairwiseIdx:5" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "n_a_sel" + value { + i: 27 + } + } + attr { + key: "n_r_sel" + value { + i: 0 + } + } +} +node { + name: "strided_slice_79/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_79/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_79/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_79" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_79/stack" + input: "strided_slice_79/stack_1" + input: "strided_slice_79/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_27/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_27" + op: "Mul" + input: "mul_27/x" + input: "strided_slice_79" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Reshape_95/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Reshape_95/shape" + op: "Pack" + input: "Reshape_95/shape/0" + input: "mul_27" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_95" + op: "Reshape" + input: "ProdForceSeA_1" + input: "Reshape_95/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_80/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_80/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_80/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_80" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_80/stack" + input: "strided_slice_80/stack_1" + input: "strided_slice_80/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_28/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_28" + op: "Mul" + input: "mul_28/x" + input: "strided_slice_80" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_force_qmmm/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_force_qmmm/shape" + op: "Pack" + input: "o_force_qmmm/shape/0" + input: "mul_28" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_force_qmmm" + op: "Reshape" + input: "Reshape_95" + input: "o_force_qmmm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "o_virial_qmmm/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\t\000\000\000" + } + } + } +} +node { + name: "o_virial_qmmm" + op: "Reshape" + input: "ProdVirialSeA_1" + input: "o_virial_qmmm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_81/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_81/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_81/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_81" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_81/stack" + input: "strided_slice_81/stack_1" + input: "strided_slice_81/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_29/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 9 + } + } + } +} +node { + name: "mul_29" + op: "Mul" + input: "mul_29/x" + input: "strided_slice_81" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_atom_virial_qmmm/shape/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "o_atom_virial_qmmm/shape" + op: "Pack" + input: "o_atom_virial_qmmm/shape/0" + input: "mul_29" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_atom_virial_qmmm" + op: "Reshape" + input: "ProdVirialSeA_1:1" + input: "o_atom_virial_qmmm/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "SegmentSum" + op: "SegmentSum" + input: "o_energy_qmmm" + input: "DprcPairwiseIdx:6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } +} +node { + name: "add_15" + op: "AddV2" + input: "o_energy_qm" + input: "SegmentSum" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_energy" + op: "Identity" + input: "add_15" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_82/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_82/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_82/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_82" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_82/stack" + input: "strided_slice_82/stack_1" + input: "strided_slice_82/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_96/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "Reshape_96/shape" + op: "Pack" + input: "strided_slice" + input: "strided_slice_82" + input: "Reshape_96/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_96" + op: "Reshape" + input: "o_force_qm" + input: "Reshape_96/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_25" + op: "Shape" + input: "Reshape_96" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_83/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_83/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_83/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_83" + op: "StridedSlice" + input: "Shape_25" + input: "strided_slice_83/stack" + input: "strided_slice_83/stack_1" + input: "strided_slice_83/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_26" + op: "Shape" + input: "Reshape_96" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_84/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_84/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_84/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_84" + op: "StridedSlice" + input: "Shape_26" + input: "strided_slice_84/stack" + input: "strided_slice_84/stack_1" + input: "strided_slice_84/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_17/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_17/values_0" + op: "Pack" + input: "strided_slice_83" + input: "concat_17/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_17/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_17" + op: "ConcatV2" + input: "concat_17/values_0" + input: "strided_slice_84" + input: "concat_17/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_4/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill_4" + op: "Fill" + input: "concat_17" + input: "Fill_4/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_27" + op: "Cast" + input: "Fill_4" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_18/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_18" + op: "ConcatV2" + input: "Cast_27" + input: "Reshape_96" + input: "concat_18/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_16/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_16" + op: "AddV2" + input: "DprcPairwiseIdx:1" + input: "add_16/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_7/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_7" + op: "GatherV2" + input: "concat_18" + input: "add_16" + input: "GatherV2_7/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "strided_slice_85/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_85/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_85/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_85" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_85/stack" + input: "strided_slice_85/stack_1" + input: "strided_slice_85/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_97/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "Reshape_97/shape" + op: "Pack" + input: "strided_slice_4" + input: "strided_slice_85" + input: "Reshape_97/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_97" + op: "Reshape" + input: "o_force_qmmm" + input: "Reshape_97/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_27" + op: "Shape" + input: "Reshape_97" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_86/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_86/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_86/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_86" + op: "StridedSlice" + input: "Shape_27" + input: "strided_slice_86/stack" + input: "strided_slice_86/stack_1" + input: "strided_slice_86/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_28" + op: "Shape" + input: "Reshape_97" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_87/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_87/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_87/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_87" + op: "StridedSlice" + input: "Shape_28" + input: "strided_slice_87/stack" + input: "strided_slice_87/stack_1" + input: "strided_slice_87/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_19/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_19/values_0" + op: "Pack" + input: "strided_slice_86" + input: "concat_19/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_19/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_19" + op: "ConcatV2" + input: "concat_19/values_0" + input: "strided_slice_87" + input: "concat_19/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_5/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill_5" + op: "Fill" + input: "concat_19" + input: "Fill_5/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_28" + op: "Cast" + input: "Fill_5" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_20/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_20" + op: "ConcatV2" + input: "Cast_28" + input: "Reshape_97" + input: "concat_20/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_17/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_17" + op: "AddV2" + input: "DprcPairwiseIdx:3" + input: "add_17/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_8/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_8" + op: "GatherV2" + input: "concat_20" + input: "add_17" + input: "GatherV2_8/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "SegmentSum_1" + op: "SegmentSum" + input: "GatherV2_8" + input: "DprcPairwiseIdx:6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } +} +node { + name: "add_18" + op: "AddV2" + input: "GatherV2_7" + input: "SegmentSum_1" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_88/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_88/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_88/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_88" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_88/stack" + input: "strided_slice_88/stack_1" + input: "strided_slice_88/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_30/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 3 + } + } + } +} +node { + name: "mul_30" + op: "Mul" + input: "mul_30/x" + input: "strided_slice_88" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_force/shape" + op: "Pack" + input: "strided_slice" + input: "mul_30" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_force" + op: "Reshape" + input: "add_18" + input: "o_force/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "SegmentSum_2" + op: "SegmentSum" + input: "o_virial_qmmm" + input: "DprcPairwiseIdx:6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } +} +node { + name: "add_19" + op: "AddV2" + input: "o_virial_qm" + input: "SegmentSum_2" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_virial" + op: "Identity" + input: "add_19" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_89/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_89/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_89/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_89" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_89/stack" + input: "strided_slice_89/stack_1" + input: "strided_slice_89/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_24/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_24/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_24/size" + op: "Pack" + input: "Slice_24/size/0" + input: "strided_slice_89" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_24" + op: "Slice" + input: "DprcPairwiseIdx:1" + input: "Slice_24/begin" + input: "Slice_24/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "strided_slice_90/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_90/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_90/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_90" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_90/stack" + input: "strided_slice_90/stack_1" + input: "strided_slice_90/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Slice_25/begin" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Slice_25/size/0" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: -1 + } + } + } +} +node { + name: "Slice_25/size" + op: "Pack" + input: "Slice_25/size/0" + input: "strided_slice_90" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Slice_25" + op: "Slice" + input: "DprcPairwiseIdx:3" + input: "Slice_25/begin" + input: "Slice_25/size" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_29" + op: "Shape" + input: "o_atom_energy_qm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_91/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_91/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_91/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_91" + op: "StridedSlice" + input: "Shape_29" + input: "strided_slice_91/stack" + input: "strided_slice_91/stack_1" + input: "strided_slice_91/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_30" + op: "Shape" + input: "o_atom_energy_qm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_92/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_92/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_92/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_92" + op: "StridedSlice" + input: "Shape_30" + input: "strided_slice_92/stack" + input: "strided_slice_92/stack_1" + input: "strided_slice_92/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_21/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_21/values_0" + op: "Pack" + input: "strided_slice_91" + input: "concat_21/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_21/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_21" + op: "ConcatV2" + input: "concat_21/values_0" + input: "strided_slice_92" + input: "concat_21/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_6/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill_6" + op: "Fill" + input: "concat_21" + input: "Fill_6/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_29" + op: "Cast" + input: "Fill_6" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_22/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_22" + op: "ConcatV2" + input: "Cast_29" + input: "o_atom_energy_qm" + input: "concat_22/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_20/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_20" + op: "AddV2" + input: "Slice_24" + input: "add_20/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_9/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_9" + op: "GatherV2" + input: "concat_22" + input: "add_20" + input: "GatherV2_9/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "Shape_31" + op: "Shape" + input: "o_atom_energy_qmmm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_93/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_93/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_93/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_93" + op: "StridedSlice" + input: "Shape_31" + input: "strided_slice_93/stack" + input: "strided_slice_93/stack_1" + input: "strided_slice_93/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_32" + op: "Shape" + input: "o_atom_energy_qmmm" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_94/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_94/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_94/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_94" + op: "StridedSlice" + input: "Shape_32" + input: "strided_slice_94/stack" + input: "strided_slice_94/stack_1" + input: "strided_slice_94/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_23/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_23/values_0" + op: "Pack" + input: "strided_slice_93" + input: "concat_23/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_23/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_23" + op: "ConcatV2" + input: "concat_23/values_0" + input: "strided_slice_94" + input: "concat_23/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_7/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill_7" + op: "Fill" + input: "concat_23" + input: "Fill_7/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_30" + op: "Cast" + input: "Fill_7" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_24/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_24" + op: "ConcatV2" + input: "Cast_30" + input: "o_atom_energy_qmmm" + input: "concat_24/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_21/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_21" + op: "AddV2" + input: "Slice_25" + input: "add_21/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_10/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_10" + op: "GatherV2" + input: "concat_24" + input: "add_21" + input: "GatherV2_10/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "SegmentSum_3" + op: "SegmentSum" + input: "GatherV2_10" + input: "DprcPairwiseIdx:6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } +} +node { + name: "add_22" + op: "AddV2" + input: "GatherV2_9" + input: "SegmentSum_3" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "o_atom_energy" + op: "Identity" + input: "add_22" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_95/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_95/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_95/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_95" + op: "StridedSlice" + input: "DprcPairwiseIdx:4" + input: "strided_slice_95/stack" + input: "strided_slice_95/stack_1" + input: "strided_slice_95/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_98/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 9 + } + } + } +} +node { + name: "Reshape_98/shape" + op: "Pack" + input: "strided_slice" + input: "strided_slice_95" + input: "Reshape_98/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_98" + op: "Reshape" + input: "o_atom_virial_qm" + input: "Reshape_98/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_33" + op: "Shape" + input: "Reshape_98" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_96/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_96/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_96/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_96" + op: "StridedSlice" + input: "Shape_33" + input: "strided_slice_96/stack" + input: "strided_slice_96/stack_1" + input: "strided_slice_96/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_34" + op: "Shape" + input: "Reshape_98" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_97/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_97/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_97/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_97" + op: "StridedSlice" + input: "Shape_34" + input: "strided_slice_97/stack" + input: "strided_slice_97/stack_1" + input: "strided_slice_97/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_25/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_25/values_0" + op: "Pack" + input: "strided_slice_96" + input: "concat_25/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_25/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_25" + op: "ConcatV2" + input: "concat_25/values_0" + input: "strided_slice_97" + input: "concat_25/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_8/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill_8" + op: "Fill" + input: "concat_25" + input: "Fill_8/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_31" + op: "Cast" + input: "Fill_8" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_26/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_26" + op: "ConcatV2" + input: "Cast_31" + input: "Reshape_98" + input: "concat_26/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_23/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_23" + op: "AddV2" + input: "DprcPairwiseIdx:1" + input: "add_23/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_11/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_11" + op: "GatherV2" + input: "concat_26" + input: "add_23" + input: "GatherV2_11/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "strided_slice_98/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_98/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_98/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_98" + op: "StridedSlice" + input: "DprcPairwiseIdx:5" + input: "strided_slice_98/stack" + input: "strided_slice_98/stack_1" + input: "strided_slice_98/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Reshape_99/shape/2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 9 + } + } + } +} +node { + name: "Reshape_99/shape" + op: "Pack" + input: "strided_slice_4" + input: "strided_slice_98" + input: "Reshape_99/shape/2" + attr { + key: "N" + value { + i: 3 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "Reshape_99" + op: "Reshape" + input: "o_atom_virial_qmmm" + input: "Reshape_99/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "Shape_35" + op: "Shape" + input: "Reshape_99" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_99/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_99/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_99/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_99" + op: "StridedSlice" + input: "Shape_35" + input: "strided_slice_99/stack" + input: "strided_slice_99/stack_1" + input: "strided_slice_99/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "Shape_36" + op: "Shape" + input: "Reshape_99" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "out_type" + value { + type: DT_INT32 + } + } + experimental_type { + type_id: TFT_PRODUCT + args { + type_id: TFT_SHAPE_TENSOR + args { + type_id: TFT_INT32 + } + } + } +} +node { + name: "strided_slice_100/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_100/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 0 + } + } + } +} +node { + name: "strided_slice_100/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_100" + op: "StridedSlice" + input: "Shape_36" + input: "strided_slice_100/stack" + input: "strided_slice_100/stack_1" + input: "strided_slice_100/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 1 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 0 + } + } +} +node { + name: "concat_27/values_0/1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_27/values_0" + op: "Pack" + input: "strided_slice_99" + input: "concat_27/values_0/1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "concat_27/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } +} +node { + name: "concat_27" + op: "ConcatV2" + input: "concat_27/values_0" + input: "strided_slice_100" + input: "concat_27/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "Fill_9/value" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } +} +node { + name: "Fill_9" + op: "Fill" + input: "concat_27" + input: "Fill_9/value" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "index_type" + value { + type: DT_INT32 + } + } +} +node { + name: "Cast_32" + op: "Cast" + input: "Fill_9" + attr { + key: "DstT" + value { + type: DT_DOUBLE + } + } + attr { + key: "SrcT" + value { + type: DT_FLOAT + } + } + attr { + key: "Truncate" + value { + b: false + } + } +} +node { + name: "concat_28/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "concat_28" + op: "ConcatV2" + input: "Cast_32" + input: "Reshape_99" + input: "concat_28/axis" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tidx" + value { + type: DT_INT32 + } + } +} +node { + name: "add_24/y" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "add_24" + op: "AddV2" + input: "DprcPairwiseIdx:3" + input: "add_24/y" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "GatherV2_12/axis" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } +} +node { + name: "GatherV2_12" + op: "GatherV2" + input: "concat_28" + input: "add_24" + input: "GatherV2_12/axis" + attr { + key: "Taxis" + value { + type: DT_INT32 + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } + attr { + key: "Tparams" + value { + type: DT_DOUBLE + } + } + attr { + key: "batch_dims" + value { + i: 1 + } + } +} +node { + name: "SegmentSum_4" + op: "SegmentSum" + input: "GatherV2_12" + input: "DprcPairwiseIdx:6" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tindices" + value { + type: DT_INT32 + } + } +} +node { + name: "add_25" + op: "AddV2" + input: "GatherV2_11" + input: "SegmentSum_4" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } +} +node { + name: "strided_slice_101/stack" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_101/stack_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 2 + } + } + } +} +node { + name: "strided_slice_101/stack_2" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 1 + } + } + } +} +node { + name: "strided_slice_101" + op: "StridedSlice" + input: "t_natoms" + input: "strided_slice_101/stack" + input: "strided_slice_101/stack_1" + input: "strided_slice_101/stack_2" + attr { + key: "Index" + value { + type: DT_INT32 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "begin_mask" + value { + i: 0 + } + } + attr { + key: "ellipsis_mask" + value { + i: 0 + } + } + attr { + key: "end_mask" + value { + i: 0 + } + } + attr { + key: "new_axis_mask" + value { + i: 0 + } + } + attr { + key: "shrink_axis_mask" + value { + i: 1 + } + } +} +node { + name: "mul_31/x" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 9 + } + } + } +} +node { + name: "mul_31" + op: "Mul" + input: "mul_31/x" + input: "strided_slice_101" + attr { + key: "T" + value { + type: DT_INT32 + } + } +} +node { + name: "o_atom_virial/shape" + op: "Pack" + input: "strided_slice" + input: "mul_31" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "axis" + value { + i: 0 + } + } +} +node { + name: "o_atom_virial" + op: "Reshape" + input: "add_25" + input: "o_atom_virial/shape" + attr { + key: "T" + value { + type: DT_DOUBLE + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +library { +} +versions { + producer: 1395 +} diff --git a/source/tests/test_pairwise_dprc.py b/source/tests/test_pairwise_dprc.py index 04aaa237b1..7fbb4fdf19 100644 --- a/source/tests/test_pairwise_dprc.py +++ b/source/tests/test_pairwise_dprc.py @@ -357,6 +357,187 @@ def test_model_ener(self): self.assertAllClose(e[1] + e[2] + e[3] - 3 * e[0], e[4] - e[0]) self.assertAllClose(f[1] + f[2] + f[3] - 3 * f[0], f[4] - f[0]) + def test_nloc(self): + jfile = tests_path / "pairwise_dprc.json" + jdata = j_loader(jfile) + model = Model(**jdata["model"]) + + sys = dpdata.LabeledSystem() + sys.data["atom_names"] = ["C", "N", "O", "H", "OW", "HW"] + sys.data["coords"] = np.array( + [ + 2.48693, + -0.12642, + 0.45320, + 3.86292, + -0.00082, + 0.07286, + 4.19135, + 0.35148, + -1.21253, + 3.35886, + 0.58875, + -2.08423, + 5.67422, + 0.44076, + -1.45160, + 2.40712, + -0.32538, + 1.52137, + 2.04219, + -0.93912, + -0.12445, + 1.98680, + 0.81574, + 0.21261, + 4.57186, + -0.33026, + 0.71127, + 6.24532, + 0.18814, + -0.55212, + 5.92647, + 1.46447, + -1.74069, + 5.95030, + -0.25321, + -2.24804, + -0.32794, + 1.50468, + 0.83176, + 0.23662, + 2.24068, + 1.13166, + -0.24528, + 1.59132, + -0.14907, + -0.50371, + -1.24800, + -0.05601, + -0.28305, + -1.84629, + 0.67555, + -0.68673, + -0.40535, + 0.41384, + 0.38397, + 0.80987, + -1.90358, + 1.30191, + 0.68503, + -2.22909, + 0.11626, + -0.11276, + -1.70506, + ] + ).reshape(1, 21, 3) + sys.data["atom_types"] = np.array( + [0, 1, 0, 2, 0, 3, 3, 3, 3, 3, 3, 3, 4, 5, 5, 4, 5, 5, 4, 5, 5] + ) + sys.data["cells"] = np.array([np.eye(3) * 30]) + nframes = 1 + natoms = 21 + sys.data["coords"] = sys.data["coords"].reshape([nframes, natoms, 3]) + sys.data["cells"] = sys.data["cells"].reshape([nframes, 3, 3]) + sys.data["energies"] = np.ones( + [ + nframes, + ] + ) + sys.data["forces"] = np.zeros([nframes, natoms, 3]) + sys.data["nopbc"] = True + sys.to_deepmd_npy("system", prec=np.float64) + idxs = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]) + np.save("system/set.000/aparam.npy", idxs) + + systems = j_must_have(jdata["training"]["training_data"], "systems") + batch_size = 1 + test_size = 1 + rcut = model.get_rcut() + + data = DeepmdDataSystem(systems, batch_size, test_size, rcut) + data.add("energy", 1, atomic=False, must=True, high_prec=True) + data.add("aparam", 1, atomic=True, must=True, high_prec=True) + test_data = data.get_test() + + t_energy = tf.placeholder(GLOBAL_ENER_FLOAT_PRECISION, [None], name="t_energy") + t_coord = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_coord") + t_type = tf.placeholder(tf.int32, [None], name="i_type") + t_natoms = tf.placeholder(tf.int32, [model.get_ntypes() + 2], name="i_natoms") + t_box = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None, 9], name="i_box") + t_mesh = tf.placeholder(tf.int32, [None], name="i_mesh") + is_training = tf.placeholder(tf.bool) + t_aparam = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_aparam") + input_dict = {} + input_dict["aparam"] = t_aparam + + model.data_stat(data) + model_pred = model.build( + t_coord, + t_type, + t_natoms, + t_box, + t_mesh, + input_dict, + suffix="se_a_atom_ener_0", + reuse=False, + ) + energy = model_pred["energy"] + force = model_pred["force"] + virial = model_pred["virial"] + + test_types = np.array( + [ + [0, 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 5], + ] + ) + nloc1 = 17 + # aparam: [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 2. 3. 1. 1. 2. 2. 3. 3.]] + feed_dict_test = { + t_energy: np.reshape(test_data["energy"], [-1]), + t_coord: np.reshape(test_data["coord"], [-1]), + t_box: np.reshape(test_data["box"], (1, 9)), + t_type: np.reshape(test_types, [-1]), + t_natoms: [nloc1, 21, nloc1, 0, 0, 0, 0, 0], + t_mesh: test_data["default_mesh"], + t_aparam: np.reshape(test_data["aparam"], [-1]), + is_training: False, + } + sess = self.cached_session().__enter__() + sess.run(tf.global_variables_initializer()) + [e1, f1, v1] = sess.run([energy, force, virial], feed_dict=feed_dict_test) + + idx_map = np.concatenate([np.arange(nloc1, 21), np.arange(nloc1)]) + idx_map_inv = np.argsort(idx_map) + feed_dict_test = { + t_energy: np.reshape(test_data["energy"], [-1]), + t_coord: np.reshape(np.reshape(test_data["coord"], [-1, 3])[idx_map], [-1]), + t_box: np.reshape(test_data["box"], (1, 9)), + t_type: np.reshape(test_types, [-1])[idx_map], + t_natoms: [21 - nloc1, 21, 21 - nloc1, 0, 0, 0, 0, 0], + t_mesh: test_data["default_mesh"], + t_aparam: np.reshape(test_data["aparam"], [-1])[idx_map], + is_training: False, + } + [e2, f2, v2] = sess.run([energy, force, virial], feed_dict=feed_dict_test) + f2 = np.reshape(np.reshape(f2, [-1, 3])[idx_map_inv], f2.shape) + + feed_dict_test = { + t_energy: np.reshape(test_data["energy"], [-1]), + t_coord: np.reshape(test_data["coord"], [-1]), + t_box: np.reshape(test_data["box"], (1, 9)), + t_type: np.reshape(test_types, [-1]), + t_natoms: [21, 21, 21, 0, 0, 0, 0, 0], + t_mesh: test_data["default_mesh"], + t_aparam: np.reshape(test_data["aparam"], [-1]), + is_training: False, + } + [e3, f3, v3] = sess.run([energy, force, virial], feed_dict=feed_dict_test) + + np.testing.assert_allclose(e1 + e2, e3, 6) + np.testing.assert_allclose(f1 + f2, f3, 6) + np.testing.assert_allclose(v1 + v2, v3, 6) + def _init_models(): system = dpdata.LabeledSystem()